First code drop for x86-64 support; further clenaups still required.
Signed-Off-By: Asit Mallick <asit.k.mallick@intel.com>
Signed-Off-By: Arun Sharma <arun.sharma@intel.com>
Signed-Off-By: Benjamin Liu <Benjamin.liu@intel.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.11-xen-sparse/arch/xen/Makefile
40f56237JTc60m1FRlUxkUaGSQKrNw linux-2.6.11-xen-sparse/arch/xen/boot/Makefile
40f56237hRxbacU_3PdoAl6DjZ3Jnw linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32
+424f001dsaMEQ1wWQnPmu0ejo6pgPA linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
40f56237wubfjJKlfIzZlI3ZM2VgGA linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32
+424f001dsBzCezYZD8vAn-h5D9ZRtQ linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
40f56237Mta0yHNaMS_qtM2rge0qYA linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig
40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6.11-xen-sparse/arch/xen/i386/Makefile
40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile
414c113396tK1HTVeUalm3u-1DF16g linux-2.6.11-xen-sparse/arch/xen/kernel/skbuff.c
418f90e4lGdeJK9rmbOB1kN-IKSjsQ linux-2.6.11-xen-sparse/arch/xen/kernel/smp.c
3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.11-xen-sparse/arch/xen/kernel/xen_proc.c
+424efaa6xahU2q85_dT-SjUJEaivfg linux-2.6.11-xen-sparse/arch/xen/x86_64/Kconfig
+424efaa6kKleWe45IrqsG8gkejgEQA linux-2.6.11-xen-sparse/arch/xen/x86_64/Makefile
+424efaa7hjpGexXRf5TYBwgP7uT5Wg linux-2.6.11-xen-sparse/arch/xen/x86_64/defconfig
+424efaa6HSyuVodl6SxFGj39vlp6MA linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/Makefile
+424efaa7bVAw3Z_q0SdFivfNVavyIg linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/asm-offsets.c
+424efaa7ddTVabh547Opf0u9vKmUXw linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/e820.c
+424efaa72fQEHYQ-Sp2IW9X2xTA5zQ linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/early_printk.c
+424efaa7B_BWrAkLPJNoKk4EQY2a7w linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S
+424efaa7vhgi7th5QVICjfuHmEWOkw linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head.S
+424efaa7tiMEZSAYepwyjaNWxyXF7Q linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head64.c
+424efaa6M6AGf53TJa2y9cl6coos0g linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/init_task.c
+424efaa6wHXXaloZygAv6ywDb7u7nQ linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ioport.c
+424efaa6gOkc9_uHCLgvY_DXPqh_sg linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/irq.c
+424efaa6ibN3xXEeXoxURmKfJF_CUA linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ldt.c
+424efaa6aX4JkXAzBf4nqxRmLUfhqQ linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c
+424efaa7e8nVw04q-pK8XRFaHPVx_A linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-nommu.c
+424efaa7CxY9cbhqapUfqVYnD7T9LQ linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c
+424efaa7I-DPzj1fkZeYPJS7rA4FAw linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup.c
+424efaa7DIVTR1U4waPGHucha9Xilg linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup64.c
+424efaa6L1lrzwCIadTNxogSvljFwg linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/signal.c
+424efaa61XzweJyW3v5Lb9egpe3rtw linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smp.c
+424efaa778MkpdkAIq0An1FjQENN_Q linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
+424efaa7vzbNdhwhkQPhs1V7LrAH4Q linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/time.c
+424efaa7szEu90xkjpXk5TufZxxa4g linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/traps.c
+424efaa6sJsuHdGIGxm0r-ugsss3OQ linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/vsyscall.c
+424efaa6xbX9LkKyaXvgbL9s_39Trw linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c
+424efaa670zlQTtnOYK_aNgqhmSx-Q linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/Makefile
+424efaa6HUC68-hBHgiWOMDfKZogIA linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/fault.c
+424f001d6S-OiHsk1n9gv1jGwws-9w linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c
+424efaa65ELRJ3JfgQQKLzW6y0ECYQ linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c
+424efaa60dTbHfv65JBLVhNLcNPcRA linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/ioremap.c
+424efaa6uMX8YJASAVJT8ral74dz9Q linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/pageattr.c
+424efaa629XgfZi3vvTAuQmhCqmvIA linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile
+424efaa64SRL9FZhtQovFJAVh9sZlQ linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile-BUS
41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6.11-xen-sparse/drivers/Makefile
4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.11-xen-sparse/drivers/char/mem.c
4111308bZAIzwf_Kzu6x1TZYZ3E0_Q linux-2.6.11-xen-sparse/drivers/char/tty_io.c
40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h
40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
41062ab7uFxnCq-KtPeAm-aV8CicgA linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/vga.h
+424f001delsctIT-_5gdbHsN9VfaQA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/arch_hooks.h
+424efa21QfpO4QqQf9ADB4U_2zo8dQ linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/bootsetup.h
+424efa21riixePBPesLRsVnhFxfEfQ linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/desc.h
+424efa21iAXuoKIT3-zDni6aryFlPQ linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h
+424efa21QCdU7W3An0BM0bboJZ6f4Q linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/fixmap.h
+424efa21S7Ruo0JzTFH1qwezpdtCbw linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/floppy.h
+424f001ds3cL9WAgSH5Nja1BAkZfDg linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h
+424efa20tMbuEQuxvPjow-wkBx83rA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/io.h
+424efa20meDrUt6I2XWbpuf72e4gEw linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/irq.h
+424f001d3cpZoX9SZD_zjTapOs-ZIQ linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/io_ports.h
+424f001eirTAXdX_1gCugGtzSGJUXw linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h
+424f001eTD7ATy8MC71Lm2rOHHyUCA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_time.h
+424f001ew4jIwfKeZUNa_U54UAaJcw linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h
+424f001ePIPWhBJGeTgj-KmiHOYvqw linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h
+424f001e0S9hTGOoEN8pgheQJ76yqQ linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h
+424f001eQPBrY1621DbCPKn9wK36ZQ linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h
+424efa21FvJNdHFfm2w2TOWohNsqDQ linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h
+424efa214neirHds4zbtwaefvG5PYA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/page.h
+424efa21-7jaHj-W-T4E9oM3kqFA7Q linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/param.h
+424efa20I76WtOlPh71MaXtai3-qZA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pci.h
+424efa20Fs7EHhAV6Hz_UtifwEfczg linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pda.h
+424efa20CGx-5HD8ahpdHxPW2KlrtA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgalloc.h
+424efa21YaMjX7hz7eCkVNcNWRK42A linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
+424efa21wPKwwFR1fcqrPD0_o3GKWA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/processor.h
+424efa20fTFqmaE0stH6lfB_4yN_lA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/ptrace.h
+424efa21fY4IvK0luYgDJHKV-MD3eQ linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/segment.h
+424efa21KcupuJlHgmPiTk_T214FrA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/smp.h
+424f001eT-7wjT_7K5AMlNVigIcuHA linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/synch_bitops.h
+424efa210ZRt2U_8WmtyI7g74Nz-4Q linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/system.h
+424f001eBp9fMbZ0Mo2kRJQ84gMgRw linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/timer.h
+424efa21Xk2acvaHYnpyTCLE6nU6hw linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/tlbflush.h
+424efa21Ey6Q4L4AsXxcEwH3vMDeiw linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/vga.h
+424efa214gNhOfFimFJHq4in24Yp1g linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/xor.h
41af4017PDMuSmMWtSRU5UC9Vylw5g linux-2.6.11-xen-sparse/include/asm-xen/balloon.h
40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.11-xen-sparse/include/asm-xen/ctrl_if.h
40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h
413cb3b53nyOv1OIeDSsCXhBFDXvJA netbsd-2.0-xen-sparse/sys/nfs/files.nfs
413aa1d0oNP8HXLvfPuMe6cSroUfSA patches/linux-2.6.11/agpgart.patch
42372652KCUP-IOH9RN19YQmGhs4aA patches/linux-2.6.11/iomap.patch
+424f001e_M1Tnxc52rDrmCLelnDWMQ patches/linux-2.6.11/x86_64-linux.patch
3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile
40e1b09db5mN69Ijj0X_Eol-S7dXiw tools/Rules.mk
4209033eUwhDBJ_bxejiv5c6gjXS4A tools/blktap/Makefile
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-xen0
+# Tue Mar 15 10:39:50 2005
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+CONFIG_XEN_PRIVILEGED_GUEST=y
+CONFIG_XEN_PHYSDEV_ACCESS=y
+CONFIG_XEN_BLKDEV_BACKEND=y
+# CONFIG_XEN_BLKDEV_TAP_BE is not set
+CONFIG_XEN_NETDEV_BACKEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+# CONFIG_XEN_BLKDEV_TAP is not set
+# CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+# CONFIG_XEN_X86 is not set
+CONFIG_XEN_X86_64=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+
+#
+# Code maturity level options
+#
+# CONFIG_EXPERIMENTAL is not set
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_SWAP is not set
+# CONFIG_SYSVIPC is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_SYSCTL is not set
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_XENARCH="x86_64"
+CONFIG_MMU=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+# CONFIG_MK8 is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_X86_GOOD_APIC=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_SMP is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_MICROCODE is not set
+# CONFIG_X86_CPUID is not set
+# CONFIG_NUMA is not set
+# CONFIG_MTRR is not set
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+
+#
+# X86_64 processor configuration
+#
+CONFIG_X86_64=y
+CONFIG_X86=y
+CONFIG_64BIT=y
+
+#
+# Processor type and features
+#
+# CONFIG_MPSC is not set
+CONFIG_GENERIC_CPU=y
+CONFIG_X86_L1_CACHE_BYTES=128
+# CONFIG_X86_TSC is not set
+# CONFIG_X86_MSR is not set
+# CONFIG_GART_IOMMU is not set
+CONFIG_DUMMY_IOMMU=y
+# CONFIG_X86_MCE is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# ACPI (Advanced Configuration and Power Interface) Support
+#
+# CONFIG_ACPI is not set
+# CONFIG_ACPI_BLACKLIST_YEAR=0
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_LEGACY_PROC=y
+# CONFIG_PCI_NAMES is not set
+# CONFIG_PCI_MMCONFIG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats / Emulations
+#
+# CONFIG_IA32_EMULATION is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_CHR_DEV_OSST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+CONFIG_SCSI_SATA=y
+# CONFIG_SCSI_SATA_AHCI is not set
+# CONFIG_SCSI_SATA_SVW is not set
+CONFIG_SCSI_ATA_PIIX=y
+# CONFIG_SCSI_SATA_NV is not set
+CONFIG_SCSI_SATA_PROMISE=y
+# CONFIG_SCSI_SATA_QSTOR is not set
+CONFIG_SCSI_SATA_SX4=y
+CONFIG_SCSI_SATA_SIL=y
+# CONFIG_SCSI_SATA_SIS is not set
+# CONFIG_SCSI_SATA_ULI is not set
+# CONFIG_SCSI_SATA_VIA is not set
+# CONFIG_SCSI_SATA_VITESSE is not set
+CONFIG_SCSI_BUSLOGIC=y
+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_SEAGATE is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_PACKET is not set
+# CONFIG_NETLINK_DEV is not set
+# CONFIG_UNIX is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_NETDEVICES is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_MWAVE is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+# CONFIG_VIDEO_SELECT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=y
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_DEVPTS_FS_XATTR=y
+CONFIG_DEVPTS_FS_SECURITY=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_TMPFS_SECURITY=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_EXPORTFS is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=y
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+
+#
+# Firmware Drivers
+#
+CONFIG_PCI_LEGACY_PROC=y
+CONFIG_PCI_NAMES=y
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-xenU
+# Mon Dec 27 10:15:03 2004
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+# CONFIG_XEN_PRIVILEGED_GUEST is not set
+# CONFIG_XEN_PHYSDEV_ACCESS is not set
+# CONFIG_XEN_BLKDEV_BACKEND is not set
+# CONFIG_XEN_NETDEV_BACKEND is not set
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+# CONFIG_XEN_BLKDEV_TAP is not set
+CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+CONFIG_X86=y
+# CONFIG_X86_64 is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# X86 Processor Configuration
+#
+CONFIG_XENARCH="x86_64"
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUM4=y
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP2 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_HPET_EMULATE_RTC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+CONFIG_X86_CPUID=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_REGPARM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_PC=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=m
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+CONFIG_UNIX98_PTYS=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS_XATTR=y
+# CONFIG_DEVPTS_FS_SECURITY is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES_586 is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC32 is not set
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
--- /dev/null
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+# Note: ISA is disabled and will hopefully never be enabled.
+# If you managed to buy an ISA x86-64 box you'll have to fix all the
+# ISA drivers you need yourself.
+#
+
+menu "X86_64 processor configuration"
+ depends XEN_X86_64
+
+config XENARCH
+ string
+ default x86_64
+
+config X86_64
+ bool
+ default y
+ help
+ Port to the x86-64 architecture. x86-64 is a 64-bit extension to the
+ classical 32-bit x86 architecture. For details see
+ <http://www.x86-64.org/>.
+
+config X86
+ bool
+ default y
+
+config 64BIT
+ def_bool y
+
+#config X86
+# bool
+# default y
+
+config MMU
+ bool
+ default y
+
+config ISA
+ bool
+
+config SBUS
+ bool
+
+config RWSEM_GENERIC_SPINLOCK
+ bool
+ default y
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config X86_CMPXCHG
+ bool
+ default y
+
+config EARLY_PRINTK
+ bool "Early Printk"
+ default n
+ help
+ Write kernel log output directly into the VGA buffer or to a serial
+ port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config HPET_TIMER
+ bool
+ default n
+ help
+ Use the IA-PC HPET (High Precision Event Timer) to manage
+ time in preference to the PIT and RTC, if a HPET is
+ present. The HPET provides a stable time base on SMP
+ systems, unlike the RTC, but it is more expensive to access,
+ as it is off-chip. You can find the HPET spec at
+ <http://www.intel.com/labs/platcomp/hpet/hpetspec.htm>.
+
+ If unsure, say Y.
+
+config HPET_EMULATE_RTC
+ bool "Provide RTC interrupt"
+ depends on HPET_TIMER && RTC=y
+
+config GENERIC_ISA_DMA
+ bool
+ default y
+
+config GENERIC_IOMAP
+ bool
+ default y
+
+#source "init/Kconfig"
+
+
+menu "Processor type and features"
+
+choice
+ prompt "Processor family"
+ default MK8
+
+config MK8
+ bool "AMD-Opteron/Athlon64"
+ help
+ Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
+
+config MPSC
+ bool "Intel x86-64"
+ help
+ Optimize for Intel IA32 with 64bit extension CPUs
+ (Prescott/Nocona/Potomac)
+
+config GENERIC_CPU
+ bool "Generic-x86-64"
+ help
+ Generic x86-64 CPU.
+
+endchoice
+
+#
+# Define implied options from the CPU selection here
+#
+config X86_L1_CACHE_BYTES
+ int
+ default "128" if GENERIC_CPU || MPSC
+ default "64" if MK8
+
+config X86_L1_CACHE_SHIFT
+ int
+ default "7" if GENERIC_CPU || MPSC
+ default "6" if MK8
+
+config X86_TSC
+ bool
+ default n
+
+config X86_GOOD_APIC
+ bool
+ default y
+
+config MICROCODE
+ tristate "/dev/cpu/microcode - Intel CPU microcode support"
+ ---help---
+ If you say Y here the 'File systems' section, you will be
+ able to update the microcode on Intel processors. You will
+ obviously need the actual microcode binary data itself which is
+ not shipped with the Linux kernel.
+
+ For latest news and information on obtaining all the required
+ ingredients for this driver, check:
+ <http://www.urbanmyth.org/microcode/>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called microcode.
+ If you use modprobe or kmod you may also want to add the line
+ 'alias char-major-10-184 microcode' to your /etc/modules.conf file.
+
+config X86_MSR
+ tristate "/dev/cpu/*/msr - Model-specific register support"
+ help
+ This device gives privileged processes access to the x86
+ Model-Specific Registers (MSRs). It is a character device with
+ major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
+ MSR accesses are directed to a specific CPU on multi-processor
+ systems.
+
+config X86_CPUID
+ tristate "/dev/cpu/*/cpuid - CPU information support"
+ help
+ This device gives processes access to the x86 CPUID instruction to
+ be executed on a specific processor. It is a character device
+ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+ /dev/cpu/31/cpuid.
+
+# disable it for opteron optimized builds because it pulls in ACPI_BOOT
+config X86_HT
+ bool
+ depends on SMP && !MK8
+ default y
+
+config MATH_EMULATION
+ bool
+
+config MCA
+ bool
+
+config EISA
+ bool
+
+#config X86_IO_APIC
+# bool
+# default n
+
+#config X86_LOCAL_APIC
+# bool
+# default n
+
+config MTRR
+ bool "MTRR (Memory Type Range Register) support"
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+ processor access to memory ranges. This is most useful if you have
+ a video (VGA) card on a PCI or AGP bus. Enabling write-combining
+ allows bus write transfers to be combined into a larger transfer
+ before bursting over the PCI/AGP bus. This can increase performance
+ of image write operations 2.5 times or more. Saying Y here creates a
+ /proc/mtrr file which may be used to manipulate your processor's
+ MTRRs. Typically the X server should use this.
+
+ This code has a reasonably generic interface so that similar
+ control registers on other processors can be easily supported
+ as well.
+
+ Saying Y here also fixes a problem with buggy SMP BIOSes which only
+ set the MTRRs for the boot CPU and not for the secondary CPUs. This
+ can lead to all sorts of problems, so it's good to say Y here.
+
+ Just say Y here, all x86-64 machines support MTRRs.
+
+ See <file:Documentation/mtrr.txt> for more information.
+
+config SMP
+ bool "Symmetric multi-processing support"
+ ---help---
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+ If you say N here, the kernel will run on single and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ singleprocessor machines. On a singleprocessor machine, the kernel
+ will run faster if you say N here.
+
+ If you don't know what to do here, say N.
+
+config PREEMPT
+ bool "Preemptible Kernel"
+ ---help---
+ This option reduces the latency of the kernel when reacting to
+ real-time or interactive events by allowing a low priority process to
+ be preempted even if it is in kernel mode executing a system call.
+ This allows applications to run more reliably even when the system is
+ under load. On contrary it may also break your drivers and add
+ priority inheritance problems to your system. Don't select it if
+ you rely on a stable system or have slightly obscure hardware.
+ It's also not very well tested on x86-64 currently.
+ You have been warned.
+
+ Say Y here if you are feeling brave and building a kernel for a
+ desktop, embedded or real-time system. Say N if you are unsure.
+
+config SCHED_SMT
+ bool "SMT (Hyperthreading) scheduler support"
+ depends on SMP
+ default off
+ help
+ SMT scheduler support improves the CPU scheduler's decision making
+ when dealing with Intel Pentium 4 chips with HyperThreading at a
+ cost of slightly increased overhead in some places. If unsure say
+ N here.
+
+config K8_NUMA
+ bool "K8 NUMA support"
+ select NUMA
+ depends on SMP
+ help
+ Enable NUMA (Non Unified Memory Architecture) support for
+ AMD Opteron Multiprocessor systems. The kernel will try to allocate
+ memory used by a CPU on the local memory controller of the CPU
+ and add some more NUMA awareness to the kernel.
+ This code is recommended on all multiprocessor Opteron systems
+ and normally doesn't hurt on others.
+
+config NUMA_EMU
+ bool "NUMA emulation support"
+ select NUMA
+ depends on SMP
+ help
+ Enable NUMA emulation. A flat machine will be split
+ into virtual nodes when booted with "numa=fake=N", where N is the
+ number of nodes. This is only useful for debugging.
+
+config DISCONTIGMEM
+ bool
+ depends on NUMA
+ default y
+
+config NUMA
+ bool
+ default n
+
+config HAVE_DEC_LOCK
+ bool
+ depends on SMP
+ default y
+
+# actually 64 maximum, but you need to fix the APIC code first
+# to use clustered mode or whatever your big iron needs
+config NR_CPUS
+ int "Maximum number of CPUs (2-8)"
+ range 2 8
+ depends on SMP
+ default "8"
+ help
+ This allows you to specify the maximum number of CPUs which this
+ kernel will support. The maximum supported value is 32 and the
+ minimum value which makes sense is 2.
+
+ This is purely to save memory - each supported CPU requires
+ memory in the static kernel configuration.
+
+config GART_IOMMU
+ bool "IOMMU support"
+ depends on PCI
+ help
+ Support the K8 IOMMU. Needed to run systems with more than 4GB of memory
+ properly with 32-bit PCI devices that do not support DAC (Double Address
+ Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter.
+ Normally the kernel will take the right choice by itself.
+ If unsure, say Y.
+
+# need this always enabled with GART_IOMMU for the VIA workaround
+config SWIOTLB
+ bool
+ depends on GART_IOMMU
+ default y
+
+config DUMMY_IOMMU
+ bool
+ depends on !GART_IOMMU && !SWIOTLB
+ default y
+ help
+ Don't use IOMMU code. This will cause problems when you have more than 4GB
+ of memory and any 32-bit devices. Don't turn on unless you know what you
+ are doing.
+
+config X86_MCE
+ bool "Machine check support" if EMBEDDED
+ default n
+ help
+ Include a machine check error handler to report hardware errors.
+ This version will require the mcelog utility to decode some
+ machine check error logs. See
+ ftp://ftp.x86-64.org/pub/linux/tools/mcelog
+
+endmenu
+
+#
+# Use the generic interrupt handling code in kernel/irq/:
+#
+config GENERIC_HARDIRQS
+ bool
+ default y
+
+config GENERIC_IRQ_PROBE
+ bool
+ default y
+
+menu "Power management options"
+
+source kernel/power/Kconfig
+
+source "drivers/acpi/Kconfig"
+
+source "arch/x86_64/kernel/cpufreq/Kconfig"
+
+endmenu
+
+menu "Bus options (PCI etc.)"
+
+config PCI
+ bool "PCI support"
+
+# x86-64 doesn't support PCI BIOS access from long mode so always go direct.
+config PCI_DIRECT
+ bool
+ depends on PCI
+ default y
+
+config PCI_MMCONFIG
+ bool "Support mmconfig PCI config space access"
+ depends on PCI
+ select ACPI_BOOT
+
+config UNORDERED_IO
+ bool "Unordered IO mapping access"
+ depends on EXPERIMENTAL
+ help
+ Use unordered stores to access IO memory mappings in device drivers.
+ Still very experimental. When a driver works on IA64/ppc64/pa-risc it should
+ work with this option, but it makes the drivers behave differently
+ from i386. Requires that the driver writer used memory barriers
+ properly.
+
+#source "drivers/pci/Kconfig"
+
+#source "drivers/pcmcia/Kconfig"
+
+#source "drivers/pci/hotplug/Kconfig"
+
+endmenu
+
+
+menu "Executable file formats / Emulations"
+
+# source "fs/Kconfig.binfmt"
+
+config IA32_EMULATION
+ bool "IA32 Emulation"
+ help
+ Include code to run 32-bit programs under a 64-bit kernel. You should likely
+ turn this on, unless you're 100% sure that you don't have any 32-bit programs
+ left.
+
+config IA32_AOUT
+ bool "IA32 a.out support"
+ depends on IA32_EMULATION
+ help
+ Support old a.out binaries in the 32bit emulation.
+
+config COMPAT
+ bool
+ depends on IA32_EMULATION
+ default y
+
+config SYSVIPC_COMPAT
+ bool
+ depends on COMPAT && SYSVIPC
+ default y
+
+config UID16
+ bool
+ depends on IA32_EMULATION
+ default y
+
+endmenu
+
+# source drivers/Kconfig
+
+# source "drivers/firmware/Kconfig"
+
+# source fs/Kconfig
+
+#source "arch/x86_64/oprofile/Kconfig"
+
+#source "arch/x86_64/Kconfig.debug"
+
+# source "security/Kconfig"
+
+# source "crypto/Kconfig"
+
+# source "lib/Kconfig"
+
+endmenu
+
--- /dev/null
+#
+# x86_64/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+# 19990713 Artur Skawina <skawina@geocities.com>
+# Added '-march' and '-mpreferred-stack-boundary' support
+# 20000913 Pavel Machek <pavel@suse.cz>
+# Converted for x86_64 architecture
+# 20010105 Andi Kleen, add IA32 compiler.
+# ....and later removed it again....
+# 20050205 Jun Nakajima <jun.nakajima@intel.com>
+# Modified for Xen
+#
+# $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $
+
+#
+# early bootup linking needs 32bit. You can either use real 32bit tools
+# here or 64bit tools in 32bit mode.
+#
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+IA32_CC := $(CC) $(CPPFLAGS) -m32 -O2 -fomit-frame-pointer
+IA32_LD := $(LD) -m elf_i386
+IA32_AS := $(CC) $(AFLAGS) -m32 -Wa,--32 -traditional -c
+IA32_OBJCOPY := $(CROSS_COMPILE)objcopy
+IA32_CPP := $(CROSS_COMPILE)gcc -m32 -E
+export IA32_CC IA32_LD IA32_AS IA32_OBJCOPY IA32_CPP
+
+
+LDFLAGS := -m elf_x86_64
+#LDFLAGS_vmlinux := -e stext
+
+CHECKFLAGS += -D__x86_64__ -m64
+
+cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
+cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+CFLAGS += $(cflags-y)
+
+CFLAGS += -mno-red-zone
+CFLAGS += -mcmodel=kernel
+CFLAGS += -pipe
+# this makes reading assembly source easier, but produces worse code
+# actually it makes the kernel smaller too.
+CFLAGS += -fno-reorder-blocks
+CFLAGS += -Wno-sign-compare
+ifneq ($(CONFIG_DEBUG_INFO),y)
+CFLAGS += -fno-asynchronous-unwind-tables
+# -fweb shrinks the kernel a bit, but the difference is very small
+# it also messes up debugging, so don't use it for now.
+#CFLAGS += $(call cc-option,-fweb)
+endif
+# -funit-at-a-time shrinks the kernel .text considerably
+# unfortunately it makes reading oopses harder.
+CFLAGS += $(call cc-option,-funit-at-a-time,)
+
+head-y := arch/xen/x86_64/kernel/head.o arch/xen/x86_64/kernel/head64.o arch/xen/x86_64/kernel/init_task.o
+
+libs-y += arch/x86_64/lib/
+core-y += arch/xen/x86_64/kernel/ arch/xen/x86_64/mm/
+core-$(CONFIG_IA32_EMULATION) += arch/xen/x86_64/ia32/
+drivers-$(CONFIG_PCI) += arch/xen/x86_64/pci/
+drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
+
+# for clean
+obj- += kernel/ mm/ pci/
+
+xenflags-y += -Iinclude/asm-xen/asm-x86_64/mach-xen
+
+CFLAGS += $(xenflags-y)
+AFLAGS += $(xenflags-y)
+
+prepare: include/asm-$(XENARCH)/asm_offset.h
+CLEAN_FILES += include/asm-$(XENARCH)/asm_offset.h
+
+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
+ include/linux/version.h include/config/MARKER
+
+
+include/asm-$(XENARCH)/asm_offset.h: arch/xen/x86_64/kernel/asm-offsets.s
+ $(call filechk,gen-asm-offsets)
+ ln -fsn asm_offset.h include/asm-$(XENARCH)/offset.h
+
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-xen0
+# Fri Mar 11 14:30:42 2005
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+CONFIG_XEN_PRIVILEGED_GUEST=y
+CONFIG_XEN_PHYSDEV_ACCESS=y
+CONFIG_XEN_BLKDEV_BACKEND=y
+CONFIG_XEN_NETDEV_BACKEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+# CONFIG_XEN_BLKDEV_TAP is not set
+CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+# CONFIG_XEN_X86 is not set
+CONFIG_XEN_X86_64=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_XENARCH="x86_64"
+CONFIG_MMU=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+# CONFIG_MK8 is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_X86_GOOD_APIC=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_SMP is not set
+# CONFIG_PREEMPT is not set
+CONFIG_MICROCODE=y
+CONFIG_X86_CPUID=y
+# CONFIG_NUMA is not set
+# CONFIG_MTRR is not set
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+
+#
+# X86_64 processor configuration
+#
+CONFIG_X86_64=y
+CONFIG_X86=y
+CONFIG_64BIT=y
+
+#
+# Processor type and features
+#
+# CONFIG_MPSC is not set
+CONFIG_GENERIC_CPU=y
+CONFIG_X86_L1_CACHE_BYTES=128
+# CONFIG_X86_TSC is not set
+# CONFIG_X86_MSR is not set
+# CONFIG_GART_IOMMU is not set
+CONFIG_DUMMY_IOMMU=y
+# CONFIG_X86_MCE is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# ACPI (Advanced Configuration and Power Interface) Support
+#
+# CONFIG_ACPI is not set
+CONFIG_ACPI_BLACKLIST_YEAR=0
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Bus options (PCI etc.)
+#
+# CONFIG_PCI_MMCONFIG is not set
+# CONFIG_UNORDERED_IO is not set
+
+#
+# Executable file formats / Emulations
+#
+# CONFIG_IA32_EMULATION is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_FW_LOADER=m
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+CONFIG_IDEDISK_MULTI_MODE=y
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+CONFIG_BLK_DEV_IDESCSI=y
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_CMD640 is not set
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+CONFIG_BLK_DEV_PIIX=y
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI Transport Attributes
+#
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+CONFIG_SCSI_SATA=y
+# CONFIG_SCSI_SATA_AHCI is not set
+# CONFIG_SCSI_SATA_SVW is not set
+CONFIG_SCSI_ATA_PIIX=y
+# CONFIG_SCSI_SATA_NV is not set
+# CONFIG_SCSI_SATA_PROMISE is not set
+# CONFIG_SCSI_SATA_SX4 is not set
+# CONFIG_SCSI_SATA_SIL is not set
+# CONFIG_SCSI_SATA_SIS is not set
+# CONFIG_SCSI_SATA_ULI is not set
+# CONFIG_SCSI_SATA_VIA is not set
+# CONFIG_SCSI_SATA_VITESSE is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA6322 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_FWMARK=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+
+#
+# IP: Virtual Server Configuration
+#
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_CT_ACCT=y
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+CONFIG_IP_NF_CT_PROTO_SCTP=m
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_PHYSDEV=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+CONFIG_IP_NF_MATCH_SCTP=m
+CONFIG_IP_NF_MATCH_COMMENT=m
+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_LOCAL=y
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_IP6_NF_QUEUE is not set
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_PHYSDEV=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+
+#
+# Bridge: Netfilter Configuration
+#
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+CONFIG_ATM=m
+CONFIG_ATM_CLIP=m
+# CONFIG_ATM_CLIP_NO_ICMP is not set
+CONFIG_ATM_LANE=m
+# CONFIG_ATM_MPOA is not set
+CONFIG_ATM_BR2684=m
+# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=y
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+CONFIG_NET_DIVERT=y
+# CONFIG_ECONET is not set
+CONFIG_WAN_ROUTER=m
+
+#
+# QoS and/or fair queueing
+#
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_QOS=y
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_NET_CLS_IND=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_CLS_POLICE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+CONFIG_IRDA=m
+
+#
+# IrDA protocols
+#
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+# CONFIG_IRDA_ULTRA is not set
+
+#
+# IrDA options
+#
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+CONFIG_IRTTY_SIR=m
+
+#
+# Dongle support
+#
+CONFIG_DONGLE=y
+CONFIG_ESI_DONGLE=m
+CONFIG_ACTISYS_DONGLE=m
+CONFIG_TEKRAM_DONGLE=m
+CONFIG_LITELINK_DONGLE=m
+CONFIG_MA600_DONGLE=m
+CONFIG_GIRBIL_DONGLE=m
+CONFIG_MCP2120_DONGLE=m
+CONFIG_OLD_BELKIN_DONGLE=m
+CONFIG_ACT200L_DONGLE=m
+
+#
+# Old SIR device drivers
+#
+# CONFIG_IRPORT_SIR is not set
+
+#
+# Old Serial dongle support
+#
+
+#
+# FIR device drivers
+#
+# CONFIG_VLSI_FIR is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_BCSP_TXCRC=y
+CONFIG_BT_HCIVHCI=m
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_ETHERTAP=m
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+CONFIG_HP100=y
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_E100_NAPI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_VIA_RHINE is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+CONFIG_E1000=y
+# CONFIG_E1000_NAPI is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+# CONFIG_HERMES is not set
+CONFIG_ATMEL=m
+# CONFIG_PCI_ATMEL is not set
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+CONFIG_NET_WIRELESS=y
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
+#
+# ATM drivers
+#
+CONFIG_ATM_TCP=m
+# CONFIG_ATM_LANAI is not set
+# CONFIG_ATM_ENI is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_ZATM is not set
+# CONFIG_ATM_IDT77252 is not set
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_FORE200E_MAYBE is not set
+# CONFIG_ATM_HE is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+# CONFIG_PPP_BSDCOMP is not set
+CONFIG_PPPOE=m
+CONFIG_PPPOATM=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+# CONFIG_SLIP_MODE_SLIP6 is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_MWAVE is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+# CONFIG_IBM_ASM is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+# CONFIG_VIDEO_SELECT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS_XATTR=y
+CONFIG_DEVPTS_FS_SECURITY=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_TMPFS_SECURITY=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+# CONFIG_BEFS_DEBUG is not set
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+# CONFIG_HPFS_FS is not set
+CONFIG_QNX4FS_FS=m
+# CONFIG_QNX4FS_RW is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_DIRECTIO=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+CONFIG_RPCSEC_GSS_SPKM3=m
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_EXPERIMENTAL is not set
+CONFIG_NCP_FS=m
+CONFIG_NCPFS_PACKET_SIGNING=y
+CONFIG_NCPFS_IOCTL_LOCKING=y
+CONFIG_NCPFS_STRONG=y
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+CONFIG_NCPFS_SMALLDOS=y
+CONFIG_NCPFS_NLS=y
+CONFIG_NCPFS_EXTRAS=y
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+CONFIG_EFI_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY_SECLVL is not set
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+# CONFIG_SECURITY_SELINUX_MLS is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+# CONFIG_PCI_LEGACY_PROC is not set
+# CONFIG_PCI_NAMES is not set
--- /dev/null
+#
+# Makefile for the linux kernel.
+#
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/kernel
+
+extra-y := head.o head64.o init_task.o
+
+obj-y := process.o signal.o entry.o traps.o \
+ time.o ioport.o ldt.o setup.o \
+ x8664_ksyms.o vsyscall.o \
+ setup64.o e820.o irq.o early_printk.o
+c-obj-y := semaphore.o i387.o sys_x86_64.o \
+ ptrace.o quirks.o syscall.o
+obj-y += ../../i386/kernel/timers/
+
+s-obj-y :=
+
+#obj-$(CONFIG_X86_MCE) += mce.o
+#obj-$(CONFIG_MTRR) += ../../i386/kernel/cpu/mtrr/
+#obj-$(CONFIG_ACPI_BOOT) += acpi/
+obj-$(CONFIG_X86_MSR) += msr.o
+obj-$(CONFIG_MICROCODE) += microcode.o
+obj-$(CONFIG_X86_CPUID) += cpuid.o
+#obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o
+#obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
+#obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o \
+# genapic.o genapic_cluster.o genapic_flat.o
+#obj-$(CONFIG_PM) += suspend.o
+#obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
+#obj-$(CONFIG_CPU_FREQ) += cpufreq/
+#obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+#obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
+c-obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o
+#obj-$(CONFIG_SWIOTLB) += swiotlb.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+
+c-obj-$(CONFIG_MODULES) += module.o
+
+#obj-y += topology.o
+c-obj-y += intel_cacheinfo.o
+
+bootflag-y += ../../../i386/kernel/bootflag.o
+cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../../i386/kernel/cpuid.o
+topology-y += ../../../i386/mach-default/topology.o
+swiotlb-$(CONFIG_SWIOTLB) += ../../../ia64/lib/swiotlb.o
+microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../../i386/kernel/microcode.o
+intel_cacheinfo-y += ../../../i386/kernel/cpu/intel_cacheinfo.o
+quirks-y += ../../../i386/kernel/quirks.o
+
+c-link := init_task.o
+s-link := vsyscall.o
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
+ ln -fsn $(srctree)/arch/x86_64/kernel/$(notdir $@) $@
+
+obj-y += $(c-obj-y) $(s-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
--- /dev/null
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/suspend.h>
+#include <asm/pda.h>
+#include <asm/processor.h>
+#include <asm/segment.h>
+#include <asm/thread_info.h>
+#include <asm/ia32.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
+ ENTRY(state);
+ ENTRY(flags);
+ ENTRY(thread);
+ ENTRY(pid);
+ BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(threadinfo_ ## entry, offsetof(struct thread_info, entry))
+ ENTRY(flags);
+ ENTRY(addr_limit);
+ ENTRY(preempt_count);
+ BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
+ ENTRY(kernelstack);
+ ENTRY(oldrsp);
+ ENTRY(pcurrent);
+ ENTRY(irqrsp);
+ ENTRY(irqcount);
+ ENTRY(cpunumber);
+ ENTRY(irqstackptr);
+ ENTRY(kernel_mode);
+ BLANK();
+#undef ENTRY
+#ifdef CONFIG_IA32_EMULATION
+#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
+ ENTRY(eax);
+ ENTRY(ebx);
+ ENTRY(ecx);
+ ENTRY(edx);
+ ENTRY(esi);
+ ENTRY(edi);
+ ENTRY(ebp);
+ ENTRY(esp);
+ ENTRY(eip);
+ BLANK();
+#undef ENTRY
+ DEFINE(IA32_RT_SIGFRAME_sigcontext,
+ offsetof (struct rt_sigframe32, uc.uc_mcontext));
+ BLANK();
+#endif
+ DEFINE(SIZEOF_PBE, sizeof(struct pbe));
+ DEFINE(pbe_address, offsetof(struct pbe, address));
+ DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+ return 0;
+}
--- /dev/null
+/*
+ * Handle the memory map.
+ * The functions here do the job until bootmem takes over.
+ * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <asm/e820.h>
+#include <asm/proto.h>
+#include <asm/bootsetup.h>
+
+/*
+ * PFN of last memory page.
+ */
+unsigned long end_pfn;
+unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
+unsigned long end_pfn_map;
+
+/*
+ * Add a memory region to the kernel e820 map.
+ */
+void __init add_memory_region(unsigned long start, unsigned long size, int type)
+{
+ int x = e820.nr_map;
+
+ if (x == E820MAX) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ e820.map[x].addr = start;
+ e820.map[x].size = size;
+ e820.map[x].type = type;
+ e820.nr_map++;
+}
+
+#if 0
+extern char _end[];
+
+/*
+ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
+ * The direct mapping extends to end_pfn_map, so that we can directly access
+ * apertures, ACPI and other tables without having to play with fixmaps.
+ */
+
+/*
+ * Last pfn which the user wants to use.
+ */
+
+extern struct resource code_resource, data_resource;
+
+/* Check for some hardcoded bad areas that early boot is not allowed to touch */
+static inline int bad_addr(unsigned long *addrp, unsigned long size)
+{
+ unsigned long addr = *addrp, last = addr + size;
+
+ /* various gunk below that needed for SMP startup */
+ if (addr < 0x8000) {
+ *addrp = 0x8000;
+ return 1;
+ }
+
+ /* direct mapping tables of the kernel */
+ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
+ *addrp = table_end << PAGE_SHIFT;
+ return 1;
+ }
+
+ /* initrd */
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
+ addr < INITRD_START+INITRD_SIZE) {
+ *addrp = INITRD_START + INITRD_SIZE;
+ return 1;
+ }
+#endif
+ /* kernel code + 640k memory hole (later should not be needed, but
+ be paranoid for now) */
+ if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
+ *addrp = __pa_symbol(&_end);
+ return 1;
+ }
+ /* XXX ramdisk image here? */
+ return 0;
+}
+
+int __init e820_mapped(unsigned long start, unsigned long end, unsigned type)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ if (type && ei->type != type)
+ continue;
+ if (ei->addr >= end || ei->addr + ei->size < start)
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Find a free area in a specific range.
+ */
+unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long addr = ei->addr, last;
+ if (ei->type != E820_RAM)
+ continue;
+ if (addr < start)
+ addr = start;
+ if (addr > ei->addr + ei->size)
+ continue;
+ while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
+ ;
+ last = addr + size;
+ if (last > ei->addr + ei->size)
+ continue;
+ if (last > end)
+ continue;
+ return addr;
+ }
+ return -1UL;
+}
+
+/*
+ * Free bootmem based on the e820 table for a node.
+ */
+void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long last, addr;
+
+ if (ei->type != E820_RAM ||
+ ei->addr+ei->size <= start ||
+ ei->addr > end)
+ continue;
+
+ addr = round_up(ei->addr, PAGE_SIZE);
+ if (addr < start)
+ addr = start;
+
+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (last >= end)
+ last = end;
+
+ if (last > addr && last-addr >= PAGE_SIZE)
+ free_bootmem_node(pgdat, addr, last-addr);
+ }
+}
+
+/*
+ * Find the highest page frame number we have available
+ */
+unsigned long __init e820_end_of_ram(void)
+{
+ int i;
+ unsigned long end_pfn = 0;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = round_up(ei->addr, PAGE_SIZE);
+ end = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (start >= end)
+ continue;
+ if (ei->type == E820_RAM) {
+ if (end > end_pfn<<PAGE_SHIFT)
+ end_pfn = end>>PAGE_SHIFT;
+ } else {
+ if (end > end_pfn_map<<PAGE_SHIFT)
+ end_pfn_map = end>>PAGE_SHIFT;
+ }
+ }
+
+ if (end_pfn > end_pfn_map)
+ end_pfn_map = end_pfn;
+ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
+ end_pfn_map = MAXMEM>>PAGE_SHIFT;
+ if (end_pfn > end_user_pfn)
+ end_pfn = end_user_pfn;
+ if (end_pfn > end_pfn_map)
+ end_pfn = end_pfn_map;
+
+ return end_pfn;
+}
+
+/*
+ * Mark e820 reserved areas as busy for the resource manager.
+ */
+void __init e820_reserve_resources(void)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct resource *res;
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
+ res = alloc_bootmem_low(sizeof(struct resource));
+ switch (e820.map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+ res->start = e820.map[i].addr;
+ res->end = res->start + e820.map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ if (e820.map[i].type == E820_RAM) {
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ }
+ }
+}
+
+
+void __init e820_print_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ printk(" %s: %016Lx - %016Lx ", who,
+ (unsigned long long) e820.map[i].addr,
+ (unsigned long long) (e820.map[i].addr + e820.map[i].size));
+ switch (e820.map[i].type) {
+ case E820_RAM: printk("(usable)\n");
+ break;
+ case E820_RESERVED:
+ printk("(reserved)\n");
+ break;
+ case E820_ACPI:
+ printk("(ACPI data)\n");
+ break;
+ case E820_NVS:
+ printk("(ACPI NVS)\n");
+ break;
+ default: printk("type %u\n", e820.map[i].type);
+ break;
+ }
+ }
+}
+
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries. The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+ struct change_member {
+ struct e820entry *pbios; /* pointer to original bios entry */
+ unsigned long long addr; /* address for this change point */
+ };
+ static struct change_member change_point_list[2*E820MAX] __initdata;
+ static struct change_member *change_point[2*E820MAX] __initdata;
+ static struct e820entry *overlap_list[E820MAX] __initdata;
+ static struct e820entry new_bios[E820MAX] __initdata;
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_bios_entry;
+ int old_nr, new_nr;
+ int i;
+
+ /*
+ Visually we're performing the following (1,2,3,4 = memory types)...
+
+ Sample memory map (w/overlaps):
+ ____22__________________
+ ______________________4_
+ ____1111________________
+ _44_____________________
+ 11111111________________
+ ____________________33__
+ ___________44___________
+ __________33333_________
+ ______________22________
+ ___________________2222_
+ _________111111111______
+ _____________________11_
+ _________________4______
+
+ Sanitized equivalent (no overlap):
+ 1_______________________
+ _44_____________________
+ ___1____________________
+ ____22__________________
+ ______11________________
+ _________1______________
+ __________3_____________
+ ___________44___________
+ _____________33_________
+ _______________2________
+ ________________1_______
+ _________________4______
+ ___________________2____
+ ____________________33__
+ ______________________4_
+ */
+
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2)
+ return -1;
+
+ old_nr = *pnr_map;
+
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i=0; i<old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+ return -1;
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i=0; i < 2*old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses) */
+ chgidx = 0;
+ for (i=0; i < old_nr; i++) {
+ change_point[chgidx]->addr = biosmap[i].addr;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ }
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i=1; i < 2*old_nr; i++) {
+ /* if <current_addr> > <last_addr>, swap */
+ /* or, if current=<start_addr> & last=<end_addr>, swap */
+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
+ ((change_point[i]->addr == change_point[i-1]->addr) &&
+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+ )
+ {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing=1;
+ }
+ }
+ }
+
+ /* create a new bios memory map, removing overlaps */
+ overlap_entries=0; /* number of entries in the overlap table */
+ new_bios_entry=0; /* index for creating new bios map entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+ /* loop through change-points, determining affect on the new bios map */
+ for (chgidx=0; chgidx < 2*old_nr; chgidx++)
+ {
+ /* keep track of all overlapping bios entries */
+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+ {
+ /* add map entry to overlap list (> 1 entry implies an overlap) */
+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+ }
+ else
+ {
+ /* remove entry from list (order independent, so swap with last) */
+ for (i=0; i<overlap_entries; i++)
+ {
+ if (overlap_list[i] == change_point[chgidx]->pbios)
+ overlap_list[i] = overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /* if there are overlapping entries, decide which "type" to use */
+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+ current_type = 0;
+ for (i=0; i<overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /* continue building up new bios map based on this information */
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /* move forward only if the new size was non-zero */
+ if (new_bios[new_bios_entry].size != 0)
+ if (++new_bios_entry >= E820MAX)
+ break; /* no more space left for new bios entries */
+ }
+ if (current_type != 0) {
+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+ new_bios[new_bios_entry].type = current_type;
+ last_addr=change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ new_nr = new_bios_entry; /* retain count for new bios entries */
+
+ /* copy new bios mapping into original location */
+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+ return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory. If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+ /* Only one memory region (or negative)? Ignore it */
+ if (nr_map < 2)
+ return -1;
+
+ do {
+ unsigned long start = biosmap->addr;
+ unsigned long size = biosmap->size;
+ unsigned long end = start + size;
+ unsigned long type = biosmap->type;
+
+ /* Overflow in 64 bits? Ignore the memory map. */
+ if (start > end)
+ return -1;
+
+ /*
+ * Some BIOSes claim RAM in the 640k - 1M region.
+ * Not right. Fix it up.
+ *
+ * This should be removed on Hammer which is supposed to not
+ * have non e820 covered ISA mappings there, but I had some strange
+ * problems so it stays for now. -AK
+ */
+ if (type == E820_RAM) {
+ if (start < 0x100000ULL && end > 0xA0000ULL) {
+ if (start < 0xA0000ULL)
+ add_memory_region(start, 0xA0000ULL-start, type);
+ if (end <= 0x100000ULL)
+ continue;
+ start = 0x100000ULL;
+ size = end - start;
+ }
+ }
+
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+}
+
+void __init setup_memory_region(void)
+{
+ char *who = "BIOS-e820";
+
+ /*
+ * Try to copy the BIOS-supplied E820-map.
+ *
+ * Otherwise fake a memory map; one section from 0k->640k,
+ * the next section from 1mb->appropriate_mem_k
+ */
+ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
+ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
+ unsigned long mem_size;
+
+ /* compare results from other methods and take the greater */
+ if (ALT_MEM_K < EXT_MEM_K) {
+ mem_size = EXT_MEM_K;
+ who = "BIOS-88";
+ } else {
+ mem_size = ALT_MEM_K;
+ who = "BIOS-e801";
+ }
+
+ e820.nr_map = 0;
+ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
+ }
+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+ e820_print_map(who);
+}
+#endif
+
+extern unsigned long xen_override_max_pfn;
+
+void __init parse_memopt(char *p, char **from)
+{
+ /*
+ * mem=XXX[kKmM] limits kernel memory to XXX+1MB
+ *
+ * It would be more logical to count from 0 instead of from
+ * HIGH_MEMORY, but we keep that for now for i386 compatibility.
+ *
+ * No support for custom mapping like i386. The reason is
+ * that we need to read the e820 map anyways to handle the
+ * ACPI mappings in the direct map. Also on x86-64 there
+ * should be always a good e820 map. This is only an upper
+ * limit, you cannot force usage of memory not in e820.
+ *
+ * -AK
+ */
+ end_user_pfn = memparse(p, from) + HIGH_MEMORY;
+ end_user_pfn >>= PAGE_SHIFT;
+ xen_override_max_pfn = (unsigned long) end_user_pfn;
+}
+
--- /dev/null
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+
+/* Simple VGA output */
+
+#ifdef __i386__
+#define VGABASE (__ISA_IO_base + 0xb8000)
+#else
+#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
+#endif
+
+#define MAX_YPOS 25
+#define MAX_XPOS 80
+
+#if 0
+static int current_ypos = 1, current_xpos = 0;
+
+static void early_vga_write(struct console *con, const char *str, unsigned n)
+{
+ char c;
+ int i, k, j;
+
+ while ((c = *str++) != '\0' && n-- > 0) {
+ if (current_ypos >= MAX_YPOS) {
+ /* scroll 1 line up */
+ for (k = 1, j = 0; k < MAX_YPOS; k++, j++) {
+ for (i = 0; i < MAX_XPOS; i++) {
+ writew(readw(VGABASE + 2*(MAX_XPOS*k + i)),
+ VGABASE + 2*(MAX_XPOS*j + i));
+ }
+ }
+ for (i = 0; i < MAX_XPOS; i++)
+ writew(0x720, VGABASE + 2*(MAX_XPOS*j + i));
+ current_ypos = MAX_YPOS-1;
+ }
+ if (c == '\n') {
+ current_xpos = 0;
+ current_ypos++;
+ } else if (c != '\r') {
+ writew(((0x7 << 8) | (unsigned short) c),
+ VGABASE + 2*(MAX_XPOS*current_ypos +
+ current_xpos++));
+ if (current_xpos >= MAX_XPOS) {
+ current_xpos = 0;
+ current_ypos++;
+ }
+ }
+ }
+}
+
+static struct console early_vga_console = {
+ .name = "earlyvga",
+ .write = early_vga_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+#endif
+
+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
+
+int early_serial_base = 0x3f8; /* ttyS0 */
+
+#define XMTRDY 0x20
+
+#define DLAB 0x80
+
+#define TXR 0 /* Transmit register (WRITE) */
+#define RXR 0 /* Receive register (READ) */
+#define IER 1 /* Interrupt Enable */
+#define IIR 2 /* Interrupt ID */
+#define FCR 2 /* FIFO control */
+#define LCR 3 /* Line control */
+#define MCR 4 /* Modem control */
+#define LSR 5 /* Line Status */
+#define MSR 6 /* Modem Status */
+#define DLL 0 /* Divisor Latch Low */
+#define DLH 1 /* Divisor latch High */
+
+#if 0
+static int early_serial_putc(unsigned char ch)
+{
+ unsigned timeout = 0xffff;
+ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
+ cpu_relax();
+ outb(ch, early_serial_base + TXR);
+ return timeout ? 0 : -1;
+}
+
+static void early_serial_write(struct console *con, const char *s, unsigned n)
+{
+ while (*s && n-- > 0) {
+ early_serial_putc(*s);
+ if (*s == '\n')
+ early_serial_putc('\r');
+ s++;
+ }
+}
+#endif
+
+#define DEFAULT_BAUD 9600
+
+#if 0
+static __init void early_serial_init(char *s)
+{
+ unsigned char c;
+ unsigned divisor;
+ unsigned baud = DEFAULT_BAUD;
+ char *e;
+
+ if (*s == ',')
+ ++s;
+
+ if (*s) {
+ unsigned port;
+ if (!strncmp(s,"0x",2)) {
+ early_serial_base = simple_strtoul(s, &e, 16);
+ } else {
+ static int bases[] = { 0x3f8, 0x2f8 };
+
+ if (!strncmp(s,"ttyS",4))
+ s += 4;
+ port = simple_strtoul(s, &e, 10);
+ if (port > 1 || s == e)
+ port = 0;
+ early_serial_base = bases[port];
+ }
+ s += strcspn(s, ",");
+ if (*s == ',')
+ s++;
+ }
+
+ outb(0x3, early_serial_base + LCR); /* 8n1 */
+ outb(0, early_serial_base + IER); /* no interrupt */
+ outb(0, early_serial_base + FCR); /* no fifo */
+ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
+
+ if (*s) {
+ baud = simple_strtoul(s, &e, 0);
+ if (baud == 0 || s == e)
+ baud = DEFAULT_BAUD;
+ }
+
+ divisor = 115200 / baud;
+ c = inb(early_serial_base + LCR);
+ outb(c | DLAB, early_serial_base + LCR);
+ outb(divisor & 0xff, early_serial_base + DLL);
+ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
+ outb(c & ~DLAB, early_serial_base + LCR);
+}
+
+static struct console early_serial_console = {
+ .name = "earlyser",
+ .write = early_serial_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+#endif
+
+static void xen_console_write(struct console *con, const char *s, unsigned n)
+{
+ HYPERVISOR_console_io(CONSOLEIO_write, n, (char *) s);
+}
+
+static struct console xen_console = {
+ .name = "xen",
+ .write = xen_console_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/* Direct interface for emergencies */
+struct console *early_console = &xen_console;
+/* struct console *early_console = &early_vga_console; */
+static int early_console_initialized = 0;
+
+void early_printk(const char *fmt, ...)
+{
+ char buf[512];
+ int n;
+ va_list ap;
+
+ va_start(ap,fmt);
+ n = vscnprintf(buf,512,fmt,ap);
+ early_console->write(early_console,buf,n);
+ va_end(ap);
+}
+
+static int keep_early;
+
+int __init setup_early_printk(char *opt)
+{
+
+ early_console = &xen_console;
+#if 0
+ if (early_console_initialized)
+ return -1;
+
+ opt = strchr(opt, '=') + 1;
+
+ strlcpy(buf,opt,sizeof(buf));
+ space = strchr(buf, ' ');
+ if (space)
+ *space = 0;
+
+ if (strstr(buf,"keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "serial", 6)) {
+ early_serial_init(buf + 6);
+ early_console = &early_serial_console;
+ } else if (!strncmp(buf, "ttyS", 4)) {
+ early_serial_init(buf);
+ early_console = &early_serial_console;
+ } else if (!strncmp(buf, "vga", 3)) {
+ early_console = &early_vga_console;
+ }
+#endif
+ early_console_initialized = 1;
+ register_console(early_console);
+ return 0;
+}
+
+void __init disable_early_printk(void)
+{
+ if (!early_console_initialized || !early_console)
+ return;
+ if (!keep_early) {
+ printk("disabling early console\n");
+ unregister_console(early_console);
+ early_console_initialized = 0;
+ } else {
+ printk("keeping early console\n");
+ }
+}
+
+__setup("earlyprintk=", setup_early_printk);
--- /dev/null
+/*
+ * linux/arch/x86_64/entry.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ *
+ * $Id$
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Asit Mallick <asit.k.mallick@intel.com>
+ * Modified for Xen
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after an interrupt and after each system call.
+ *
+ * Normal syscalls and interrupts don't save a full stack frame, this is
+ * only done for syscall tracing, signals or fork/exec et.al.
+ *
+ * A note on terminology:
+ * - top of stack: Architecture defined interrupt frame from SS to RIP
+ * at the top of the kernel process stack.
+ * - partial stack frame: partially saved registers upto R11.
+ * - full stack frame: Like partial stack frame, but all register saved.
+ *
+ * TODO:
+ * - schedule it carefully for the final hardware.
+ */
+
+#define ASSEMBLY 1
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include <asm/dwarf2.h>
+#include <asm/calling.h>
+#include <asm/asm_offset.h>
+#include <asm/msr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/hw_irq.h>
+#include <asm/errno.h>
+#include <asm-xen/xen-public/arch-x86_64.h>
+
+
+EVENT_MASK = (CS+4)
+ECF_IN_SYSCALL = (1<<8)
+
+/*
+ * Copied from arch/xen/i386/kernel/entry.S
+ */
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending 0
+#define evtchn_upcall_mask 1
+
+#define sizeof_vcpu_shift 3
+
+#ifdef CONFIG_SMP
+#define XEN_GET_VCPU_INFO(reg)
+#define preempt_disable(reg) incl TI_preempt_count(reg)
+#define preempt_enable(reg) decl TI_preempt_count(reg)
+#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%rbp) ; \
+ movl TI_cpu(%rbp),reg ; \
+ shl $sizeof_vcpu_shift,reg ; \
+ addl HYPERVISOR_shared_info,reg
+#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%rbp)
+#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0xff,0xff,0xff
+#define Ux00 0xff
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_BLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \
+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
+ XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_UNBLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \
+ movb $0,evtchn_upcall_mask(reg) ; \
+ XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp) ; \
+ XEN_LOCK_VCPU_INFO_SMP(reg) ; \
+ movb evtchn_upcall_mask(reg), tmp ; \
+ movb tmp, off(%rsp) ; \
+ XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#else
+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_LOCK_VCPU_INFO_SMP(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_UNLOCK_VCPU_INFO_SMP_fixup
+#define Ux00 0x00
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_BLOCK_EVENTS(reg) XEN_LOCKED_BLOCK_EVENTS(reg)
+#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
+ movb evtchn_upcall_mask(reg), tmp; \
+ movb tmp, off(%rsp)
+#endif
+
+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+
+ .code64
+
+#ifdef CONFIG_PREEMPT
+#define preempt_stop XEN_BLOCK_EVENTS(%rsi)
+#else
+#define preempt_stop
+#define retint_kernel retint_restore_args
+#endif
+
+
+/*
+ * C code is not supposed to know about undefined top of stack. Every time
+ * a C function with an pt_regs argument is called from the SYSCALL based
+ * fast path FIXUP_TOP_OF_STACK is needed.
+ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
+ * manipulation.
+ */
+
+
+ .macro FAKE_STACK_FRAME child_rip
+ /* push in order ss, rsp, eflags, cs, rip */
+ xorq %rax, %rax
+ pushq %rax /* ss */
+ CFI_ADJUST_CFA_OFFSET 8
+ pushq %rax /* rsp */
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_OFFSET rip,0
+ pushq $(1<<9) /* eflags - interrupts on */
+ CFI_ADJUST_CFA_OFFSET 8
+ pushq $__KERNEL_CS /* cs */
+ CFI_ADJUST_CFA_OFFSET 8
+ pushq \child_rip /* rip */
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_OFFSET rip,0
+ pushq %rax /* orig rax */
+ CFI_ADJUST_CFA_OFFSET 8
+ .endm
+
+ .macro UNFAKE_STACK_FRAME
+ addq $8*6, %rsp
+ CFI_ADJUST_CFA_OFFSET -(6*8)
+ .endm
+
+ .macro CFI_DEFAULT_STACK
+ CFI_ADJUST_CFA_OFFSET (SS)
+ CFI_OFFSET r15,R15-SS
+ CFI_OFFSET r14,R14-SS
+ CFI_OFFSET r13,R13-SS
+ CFI_OFFSET r12,R12-SS
+ CFI_OFFSET rbp,RBP-SS
+ CFI_OFFSET rbx,RBX-SS
+ CFI_OFFSET r11,R11-SS
+ CFI_OFFSET r10,R10-SS
+ CFI_OFFSET r9,R9-SS
+ CFI_OFFSET r8,R8-SS
+ CFI_OFFSET rax,RAX-SS
+ CFI_OFFSET rcx,RCX-SS
+ CFI_OFFSET rdx,RDX-SS
+ CFI_OFFSET rsi,RSI-SS
+ CFI_OFFSET rdi,RDI-SS
+ CFI_OFFSET rsp,RSP-SS
+ CFI_OFFSET rip,RIP-SS
+ .endm
+
+ /*
+ * Must be consistent with the definition in arch_x86_64.h:
+ * struct switch_to_user {
+ * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
+ * } PACKED;
+ * #define ECF_IN_SYSCALL (1<<8)
+ */
+ .macro SWITCH_TO_USER flag
+ movl $0,%gs:pda_kernel_mode # change to user mode
+ subq $8*4,%rsp # reuse rip, cs, rflags, rsp, ss in the stack
+ movq %rax,(%rsp)
+ movq %r11,1*8(%rsp)
+ movq %rcx,2*8(%rsp) # we saved %rcx upon exceptions
+ movq $\flag,3*8(%rsp)
+ movq $__USER_CS,5*8(%rsp)
+ movq $__USER_DS,8*8(%rsp)
+ movq $__HYPERVISOR_switch_to_user,%rax
+ syscall
+ .endm
+
+ .macro SWITCH_TO_KERNEL ssoff,adjust=0
+ btsq $0,%gs:pda_kernel_mode
+ jc 1f
+ orb $1,\ssoff-\adjust+4(%rsp)
+1:
+ .endm
+
+/*
+ * A newly forked process directly context switches into this.
+ */
+/* rdi: prev */
+ENTRY(ret_from_fork)
+ CFI_STARTPROC
+ CFI_DEFAULT_STACK
+ call schedule_tail
+ GET_THREAD_INFO(%rcx)
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
+ jnz rff_trace
+rff_action:
+ RESTORE_REST
+ cmpl $__KERNEL_CS,CS-ARGOFFSET(%rsp) # from kernel_thread?
+ je int_ret_from_sys_call
+ testl $_TIF_IA32,threadinfo_flags(%rcx)
+ jnz int_ret_from_sys_call
+ jmp ret_from_sys_call
+rff_trace:
+ movq %rsp,%rdi
+ call syscall_trace_leave
+ GET_THREAD_INFO(%rcx)
+ jmp rff_action
+ CFI_ENDPROC
+
+/*
+ * System call entry. Upto 6 arguments in registers are supported.
+ *
+ * SYSCALL does not save anything on the stack and does not change the
+ * stack pointer.
+ */
+
+/*
+ * Register setup:
+ * rax system call number
+ * rdi arg0
+ * rcx return address for syscall/sysret, C arg3
+ * rsi arg1
+ * rdx arg2
+ * r10 arg3 (--> moved to rcx for C)
+ * r8 arg4
+ * r9 arg5
+ * r11 eflags for syscall/sysret, temporary for C
+ * r12-r15,rbp,rbx saved by C code, not touched.
+ *
+ * Interrupts are off on entry.
+ * Only called from user space.
+ *
+ * XXX if we had a free scratch register we could save the RSP into the stack frame
+ * and report it properly in ps. Unfortunately we haven't.
+ */
+
+ENTRY(system_call)
+ CFI_STARTPROC
+ SAVE_ARGS -8,0
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_SAVE_UPCALL_MASK(%r11,%cl,EVENT_MASK-ARGOFFSET) # saved %rcx
+ XEN_UNBLOCK_EVENTS(%r11)
+ GET_THREAD_INFO(%rcx)
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
+ jnz tracesys
+ cmpq $__NR_syscall_max,%rax
+ ja badsys
+ movq %r10,%rcx
+ call *sys_call_table(,%rax,8) # XXX: rip relative
+ movq %rax,RAX-ARGOFFSET(%rsp)
+/*
+ * Syscall return path ending with SYSRET (fast path)
+ * Has incomplete stack frame and undefined top of stack.
+ */
+ .globl ret_from_sys_call
+ret_from_sys_call:
+ movl $_TIF_WORK_MASK,%edi
+ /* edi: flagmask */
+sysret_check:
+ GET_THREAD_INFO(%rcx)
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_BLOCK_EVENTS(%r11)
+ movl threadinfo_flags(%rcx),%edx
+ andl %edi,%edx
+ jnz sysret_careful
+ XEN_UNBLOCK_EVENTS(%r11)
+ RESTORE_ARGS 0,8,0
+ SWITCH_TO_USER ECF_IN_SYSCALL
+
+ /* Handle reschedules */
+ /* edx: work, edi: workmask */
+sysret_careful:
+ bt $TIF_NEED_RESCHED,%edx
+ jnc sysret_signal
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_BLOCK_EVENTS(%r11)
+ pushq %rdi
+ call schedule
+ popq %rdi
+ jmp sysret_check
+
+ /* Handle a signal */
+sysret_signal:
+/* sti */
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_UNBLOCK_EVENTS(%r11)
+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
+ jz 1f
+
+ /* Really a signal */
+ /* edx: work flags (arg3) */
+ leaq do_notify_resume(%rip),%rax
+ leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
+ xorl %esi,%esi # oldset -> arg2
+ call ptregscall_common
+1: movl $_TIF_NEED_RESCHED,%edi
+ jmp sysret_check
+
+ /* Do syscall tracing */
+tracesys:
+ SAVE_REST
+ movq $-ENOSYS,RAX(%rsp)
+ movq %rsp,%rdi
+ call syscall_trace_enter
+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $__NR_syscall_max,%rax
+ ja 1f
+ movq %r10,%rcx /* fixup for C */
+ call *sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+1: SAVE_REST
+ movq %rsp,%rdi
+ call syscall_trace_leave
+ RESTORE_REST
+ jmp ret_from_sys_call
+
+badsys:
+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
+ jmp ret_from_sys_call
+
+/*
+ * Syscall return path ending with IRET.
+ * Has correct top of stack, but partial stack frame.
+ */
+ENTRY(int_ret_from_sys_call)
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_BLOCK_EVENTS(%r11)
+ testb $1,SS-ARGOFFSET+4(%rsp)
+ jnz 1f
+ /* Need to set the proper %ss (not NULL) for ring 3 iretq */
+ movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
+ jmp retint_restore_args # retrun from ring3 kernel
+1:
+ movl $_TIF_ALLWORK_MASK,%edi
+ /* edi: mask to check */
+int_with_check:
+ GET_THREAD_INFO(%rcx)
+ movl threadinfo_flags(%rcx),%edx
+ andl %edi,%edx
+ jnz int_careful
+ jmp retint_restore_args
+
+ /* Either reschedule or signal or syscall exit tracking needed. */
+ /* First do a reschedule test. */
+ /* edx: work, edi: workmask */
+int_careful:
+ bt $TIF_NEED_RESCHED,%edx
+ jnc int_very_careful
+/* sti */
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_UNBLOCK_EVENTS(%r11)
+ pushq %rdi
+ call schedule
+ popq %rdi
+ jmp int_with_check
+
+ /* handle signals and tracing -- both require a full stack frame */
+int_very_careful:
+/* sti */
+ SAVE_REST
+ /* Check for syscall exit trace */
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
+ jz int_signal
+ pushq %rdi
+ leaq 8(%rsp),%rdi # &ptregs -> arg1
+ call syscall_trace_leave
+ popq %rdi
+ btr $TIF_SYSCALL_TRACE,%edi
+ btr $TIF_SYSCALL_AUDIT,%edi
+ btr $TIF_SINGLESTEP,%edi
+ jmp int_restore_rest
+
+int_signal:
+ testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
+ jz 1f
+ movq %rsp,%rdi # &ptregs -> arg1
+ xorl %esi,%esi # oldset -> arg2
+ call do_notify_resume
+1: movl $_TIF_NEED_RESCHED,%edi
+int_restore_rest:
+ RESTORE_REST
+ jmp int_with_check
+ CFI_ENDPROC
+
+/*
+ * Certain special system calls that need to save a complete full stack frame.
+ */
+
+ .macro PTREGSCALL label,func,arg
+ .globl \label
+\label:
+ leaq \func(%rip),%rax
+ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
+ jmp ptregscall_common
+ .endm
+
+ PTREGSCALL stub_clone, sys_clone, %r8
+ PTREGSCALL stub_fork, sys_fork, %rdi
+ PTREGSCALL stub_vfork, sys_vfork, %rdi
+ PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
+ PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
+ PTREGSCALL stub_iopl, sys_iopl, %rsi
+
+ENTRY(ptregscall_common)
+ CFI_STARTPROC
+ popq %r11
+ CFI_ADJUST_CFA_OFFSET -8
+ SAVE_REST
+ movq %r11, %r15
+ call *%rax
+ movq %r15, %r11
+ RESTORE_REST
+ pushq %r11
+ CFI_ADJUST_CFA_OFFSET 8
+ ret
+ CFI_ENDPROC
+
+ENTRY(stub_execve)
+ CFI_STARTPROC
+ popq %r11
+ CFI_ADJUST_CFA_OFFSET -8
+ SAVE_REST
+ movq %r11, %r15
+ call sys_execve
+ GET_THREAD_INFO(%rcx)
+ bt $TIF_IA32,threadinfo_flags(%rcx)
+ jc exec_32bit
+ movq %r15, %r11
+ RESTORE_REST
+ push %r11
+ ret
+
+exec_32bit:
+ CFI_ADJUST_CFA_OFFSET REST_SKIP
+ movq %rax,RAX(%rsp)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+
+/*
+ * sigreturn is special because it needs to restore all registers on return.
+ * This cannot be done with SYSRET, so use the IRET return path instead.
+ */
+ENTRY(stub_rt_sigreturn)
+ CFI_STARTPROC
+ addq $8, %rsp
+ SAVE_REST
+ movq %rsp,%rdi
+ call sys_rt_sigreturn
+ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+
+
+/*
+ * Interrupt entry/exit.
+ *
+ * Interrupt entry points save only callee clobbered registers in fast path.
+ *
+ * Entry runs with interrupts off.
+ */
+
+/* 0(%rsp): interrupt number */
+ .macro interrupt func
+ CFI_STARTPROC simple
+ CFI_DEF_CFA rsp,(SS-RDI)
+ CFI_REL_OFFSET rsp,(RSP-ORIG_RAX)
+ CFI_REL_OFFSET rip,(RIP-ORIG_RAX)
+ cld
+#ifdef CONFIG_DEBUG_INFO
+ SAVE_ALL
+ movq %rsp,%rdi
+ /*
+ * Setup a stack frame pointer. This allows gdb to trace
+ * back to the original stack.
+ */
+ movq %rsp,%rbp
+ CFI_DEF_CFA_REGISTER rbp
+#else
+ SAVE_ARGS
+ leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
+#endif
+#if 0 /* For Xen we don't need to do this */
+ testl $3,CS(%rdi)
+ je 1f
+ swapgs
+#endif
+1: addl $1,%gs:pda_irqcount # RED-PEN should check preempt count
+ movq %gs:pda_irqstackptr,%rax
+ cmoveq %rax,%rsp
+ pushq %rdi # save old stack
+ call \func
+ .endm
+
+retint_check:
+ movl threadinfo_flags(%rcx),%edx
+ andl %edi,%edx
+ jnz retint_careful
+retint_restore_args:
+ RESTORE_ARGS 0,8,0
+ testb $3,8(%rsp) # check CS
+ jnz user_mode
+kernel_mode:
+ orb $3,1*8(%rsp)
+ iretq
+user_mode:
+ SWITCH_TO_USER 0
+
+ /* edi: workmask, edx: work */
+retint_careful:
+ bt $TIF_NEED_RESCHED,%edx
+ jnc retint_signal
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_UNBLOCK_EVENTS(%r11)
+/* sti */
+ pushq %rdi
+ call schedule
+ popq %rdi
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_BLOCK_EVENTS(%r11)
+ GET_THREAD_INFO(%rcx)
+/* cli */
+ jmp retint_check
+
+retint_signal:
+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
+ jz retint_restore_args
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_UNBLOCK_EVENTS(%r11)
+ SAVE_REST
+ movq $-1,ORIG_RAX(%rsp)
+ xorq %rsi,%rsi # oldset
+ movq %rsp,%rdi # &pt_regs
+ call do_notify_resume
+ RESTORE_REST
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_BLOCK_EVENTS(%r11)
+ movl $_TIF_NEED_RESCHED,%edi
+ GET_THREAD_INFO(%rcx)
+ jmp retint_check
+
+#ifdef CONFIG_PREEMPT
+ /* Returning to kernel space. Check if we need preemption */
+ /* rcx: threadinfo. interrupts off. */
+ .p2align
+retint_kernel:
+ cmpl $0,threadinfo_preempt_count(%rcx)
+ jnz retint_restore_args
+ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
+ jnc retint_restore_args
+ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
+ jc retint_restore_args
+ movl $PREEMPT_ACTIVE,threadinfo_preempt_count(%rcx)
+/* sti */
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_BLOCK_EVENTS(%r11)
+ call schedule
+ XEN_GET_VCPU_INFO(%r11) /* %esi can be different */
+ XEN_UNBLOCK_EVENTS(%r11)
+/* cli */
+ GET_THREAD_INFO(%rcx)
+ movl $0,threadinfo_preempt_count(%rcx)
+ jmp retint_kernel /* check again */
+#endif
+ CFI_ENDPROC
+
+/*
+ * APIC interrupts.
+ */
+ .macro apicinterrupt num,func
+ pushq $\num-256
+ interrupt \func
+ jmp ret_from_intr
+ CFI_ENDPROC
+ .endm
+
+#ifdef CONFIG_SMP
+ENTRY(reschedule_interrupt)
+ apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
+
+ENTRY(invalidate_interrupt)
+ apicinterrupt INVALIDATE_TLB_VECTOR,smp_invalidate_interrupt
+
+ENTRY(call_function_interrupt)
+ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ENTRY(apic_timer_interrupt)
+ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
+
+ENTRY(error_interrupt)
+ apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
+
+ENTRY(spurious_interrupt)
+ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
+#endif
+
+/*
+ * Exception entry points.
+ */
+ .macro zeroentry sym
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* skip rcx and r11 */
+ pushq $0 /* push error code/oldrax */
+ pushq %rax /* push real oldrax to the rdi slot */
+ leaq \sym(%rip),%rax
+ jmp error_entry
+ .endm
+
+ .macro errorentry sym
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x18,%rsp /* rsp points to the error code */
+ pushq %rax
+ leaq \sym(%rip),%rax
+ jmp error_entry
+ .endm
+
+ /* error code is on the stack already */
+ /* handle NMI like exceptions that can happen everywhere */
+ .macro paranoidentry sym
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* skip rcx and r11 */
+ SAVE_ALL
+ cld
+ movl $1,%ebx
+ movl $MSR_GS_BASE,%ecx
+ rdmsr
+ testl %edx,%edx
+ js 1f
+/* swapgs */
+ xorl %ebx,%ebx
+1: movq %rsp,%rdi
+ movq ORIG_RAX(%rsp),%rsi
+ movq $-1,ORIG_RAX(%rsp)
+ call \sym
+ .endm
+
+/*
+ * Exception entry point. This expects an error code/orig_rax on the stack
+ * and the exception handler in %rax.
+ */
+ENTRY(error_entry)
+ CFI_STARTPROC simple
+ CFI_DEF_CFA rsp,(SS-RDI)
+ CFI_REL_OFFSET rsp,(RSP-RDI)
+ CFI_REL_OFFSET rip,(RIP-RDI)
+ /* rdi slot contains rax, oldrax contains error code */
+ cld
+ subq $14*8,%rsp
+ CFI_ADJUST_CFA_OFFSET (14*8)
+ movq %rsi,13*8(%rsp)
+ CFI_REL_OFFSET rsi,RSI
+ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
+ movq %rdx,12*8(%rsp)
+ CFI_REL_OFFSET rdx,RDX
+ movq %rcx,11*8(%rsp)
+ CFI_REL_OFFSET rcx,RCX
+ movq %rsi,10*8(%rsp) /* store rax */
+ CFI_REL_OFFSET rax,RAX
+ movq %r8, 9*8(%rsp)
+ CFI_REL_OFFSET r8,R8
+ movq %r9, 8*8(%rsp)
+ CFI_REL_OFFSET r9,R9
+ movq %r10,7*8(%rsp)
+ CFI_REL_OFFSET r10,R10
+ movq %r11,6*8(%rsp)
+ CFI_REL_OFFSET r11,R11
+ movq %rbx,5*8(%rsp)
+ CFI_REL_OFFSET rbx,RBX
+ movq %rbp,4*8(%rsp)
+ CFI_REL_OFFSET rbp,RBP
+ movq %r12,3*8(%rsp)
+ CFI_REL_OFFSET r12,R12
+ movq %r13,2*8(%rsp)
+ CFI_REL_OFFSET r13,R13
+ movq %r14,1*8(%rsp)
+ CFI_REL_OFFSET r14,R14
+ movq %r15,(%rsp)
+ CFI_REL_OFFSET r15,R15
+#if 0
+ cmpl $__KERNEL_CS,CS(%rsp)
+ je error_kernelspace
+#endif
+error_call_handler:
+ movq %rdi, RDI(%rsp)
+ movq %rsp,%rdi
+ movq ORIG_RAX(%rsp),%rsi # get error code
+ movq $-1,ORIG_RAX(%rsp)
+ leaq do_hypervisor_callback,%rcx
+ cmpq %rax,%rcx
+ je 0f # don't save event mask for callbacks
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_SAVE_UPCALL_MASK(%r11,%cl,EVENT_MASK)
+0:
+ call *%rax
+error_check_event:
+ movb EVENT_MASK(%rsp), %al
+ notb %al # %al == ~saved_mask
+ XEN_LOCK_VCPU_INFO_SMP(%rsi)
+ andb evtchn_upcall_mask(%rsi),%al
+ andb $1,%al # %al == mask & ~saved_mask
+ jnz restore_all_enable_events # != 0 => reenable event delivery
+ XEN_UNLOCK_VCPU_INFO_SMP(%rsi)
+error_exit:
+ RESTORE_REST
+/* cli */
+ GET_THREAD_INFO(%rcx)
+ testb $3,CS-REST_SKIP(%rsp)
+ jz retint_kernel
+ movl threadinfo_flags(%rcx),%edx
+ movl $_TIF_WORK_MASK,%edi
+ andl %edi,%edx
+ jnz retint_careful
+ RESTORE_ARGS 0,8,0
+ SWITCH_TO_USER 0
+ CFI_ENDPROC
+
+error_kernelspace:
+ /*
+ * We need to re-write the logic here because we don't do iretq to
+ * to return to user mode. It's still possible that we get trap/fault
+ * in the kernel (when accessing buffers pointed to by system calls,
+ * for example).
+ *
+ */
+#if 0
+ incl %ebx
+ /* There are two places in the kernel that can potentially fault with
+ usergs. Handle them here. The exception handlers after
+ iret run with kernel gs again, so don't set the user space flag.
+ B stepping K8s sometimes report an truncated RIP for IRET
+ exceptions returning to compat mode. Check for these here too. */
+ leaq iret_label(%rip),%rbp
+ cmpq %rbp,RIP(%rsp)
+ je error_swapgs
+ movl %ebp,%ebp /* zero extend */
+ cmpq %rbp,RIP(%rsp)
+ je error_swapgs
+ cmpq $gs_change,RIP(%rsp)
+ je error_swapgs
+ jmp error_sti
+#endif
+
+ENTRY(hypervisor_callback)
+ zeroentry do_hypervisor_callback
+
+/*
+ * Copied from arch/xen/i386/kernel/entry.S
+ */
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until we've done all processing. HOWEVER, we must enable events before
+# popping the stack frame (can't be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so we'd
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+
+ENTRY(do_hypervisor_callback) # do_hyperviosr_callback(struct *pt_regs)
+# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
+# see the correct pointer to the pt_regs
+ addq $8, %rsp # we don't return, adjust the stack frame
+ movq RIP(%rsp),%rax
+ cmpq $scrit,%rax
+ jb 11f
+ cmpq $ecrit,%rax
+ jb critical_region_fixup
+11: movb $0, EVENT_MASK(%rsp)
+ call evtchn_do_upcall
+ jmp error_check_event
+
+ ALIGN
+restore_all_enable_events:
+ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
+scrit: /**** START OF CRITICAL REGION ****/
+ XEN_TEST_PENDING(%rsi)
+ jnz 14f # process more events if necessary...
+ XEN_UNLOCK_VCPU_INFO_SMP(%rsi)
+ RESTORE_REST
+ jmp retint_restore_args
+
+14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
+ XEN_UNLOCK_VCPU_INFO_SMP(%rsi)
+ movq %rsp,%rdi # set the argument again
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame.
+critical_region_fixup:
+ subq $scrit,%rax
+ shlq $1,%rax
+ addq $critical_fixup_table,%rax
+ movzwq (%rax),%rcx
+ xorq %rax,%rax
+ movb %ch,%al
+ movb $0,%ch
+#ifdef CONFIG_SMP
+ cmpb $0xff,%al
+ jne 15f
+ add $1,%al
+ GET_THREAD_INFO(%rbp)
+ XEN_UNLOCK_VCPU_INFO_SMP(%r11)
+15:
+#endif
+ movq %rsp,%rsi
+ movq %rsi,%rdi
+ addq $0xa8,%rax
+ addq %rax,%rdi
+ addq %rcx,%rsi
+ shrq $3,%rcx # convert words to bytes
+ je 17f # skip loop if nothing to copy
+16: subq $8,%rsi # pre-decrementing copy loop
+ subq $8,%rdi
+ movq (%rsi),%rax
+ movq %rax,(%rdi)
+ loop 16b
+17: movq %rdi,%rsp # final %edi is top of merged stack
+ jmp 11b
+
+critical_fixup_table:
+ .word 0x0000,0x0000,0x0000,0x0000 # testb $0xff,0x0(%rsi)
+ .word 0x0000,0x0000 # jne ffffffff8010daa0 14f
+ .word 0x0000,0x0000,0x0000,0x0000 # mov (%rsp),%r15
+ .word 0x0808,0x0808,0x0808,0x0808,0x0808 # mov 0x8(%rsp),%r14
+ .word 0x1010,0x1010,0x1010,0x1010,0x1010 # mov 0x10(%rsp),%r13
+ .word 0x1818,0x1818,0x1818,0x1818,0x1818 # mov 0x18(%rsp),%r12
+ .word 0x2020,0x2020,0x2020,0x2020,0x2020 # mov 0x20(%rsp),%rbp
+ .word 0x2828,0x2828,0x2828,0x2828,0x2828 # mov 0x28(%rsp),%rbx
+ .word 0x3030,0x3030,0x3030,0x3030 # add $0x30,%rsp
+ .word 0x0030,0x0030,0x0030,0x0030,0x0030 # testb $0x1,0x74(%rsp)
+ .word 0x0030,0x0030,0x0030,0x0030,0x0030,0x0030 # jne ffffffff8010d740 <user_mode>
+ .word 0x0030,0x0030,0x0030,0x0030 # mov (%rsp),%r11
+ .word 0x0838,0x0838,0x0838,0x0838,0x0838 # mov 0x8(%rsp),%r10
+ .word 0x1040,0x1040,0x1040,0x1040,0x1040 # mov 0x10(%rsp),%r9
+ .word 0x1848,0x1848,0x1848,0x1848,0x1848 # mov 0x18(%rsp),%r8
+ .word 0x2060,0x2060,0x2060,0x2060,0x2060 # mov 0x20(%rsp),%rax
+ .word 0x2868,0x2868,0x2868,0x2868,0x2868 # mov 0x28(%rsp),%rcx
+ .word 0x3070,0x3070,0x3070,0x3070,0x3070 # mov 0x30(%rsp),%rdx
+ .word 0x3878,0x3878,0x3878,0x3878,0x3878 # mov 0x38(%rsp),%rsi
+ .word 0x4080,0x4080,0x4080,0x4080,0x4080 # mov 0x40(%rsp),%rdi
+ .word 0x4888,0x4888,0x4888,0x4888 # add $0x50,%rsp
+ .word 0x0000,0x0000 # iretq
+ .word 0x0000,0x0000,0x0000,0x0000 # movb $0x1,0x1(%rsi)
+ .word 0x0000,0x0000,0x0000 # mov %rsp,%rdi
+ .word 0x0000,0x0000,0x0000,0x0000,0x0000 # jmpq 11b
+
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+ hlt
+#if 0
+1: movl (%rsp),%ds
+2: movl 8(%rsp),%es
+3: movl 16(%rsp),%fs
+4: movl 24(%rsp),%gs
+ subq $14,%rsp
+ SAVE_ALL
+ jmp ret_from_exception
+.section .fixup,"ax"; \
+6: movq $0,(%rsp); \
+ jmp 1b; \
+7: movq $0,(%rsp); \
+ jmp 2b; \
+8: movq $0,(%rsp); \
+ jmp 3b; \
+9: movq $0,(%rsp); \
+ jmp 4b; \
+.previous; \
+.section __ex_table,"a";\
+ .align 8; \
+ .long 1b,6b; \
+ .long 2b,7b; \
+ .long 3b,8b; \
+ .long 4b,9b; \
+.previous
+
+ .section __ex_table,"a"
+ .align 8
+ .quad gs_change,bad_gs
+ .previous
+ .section .fixup,"ax"
+ /* running with kernelgs */
+bad_gs:
+/* swapgs */ /* switch back to user gs */
+ xorl %eax,%eax
+ movl %eax,%gs
+ jmp 2b
+ .previous
+#endif
+/*
+ * Create a kernel thread.
+ *
+ * C extern interface:
+ * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ *
+ * asm input arguments:
+ * rdi: fn, rsi: arg, rdx: flags
+ */
+ENTRY(kernel_thread)
+ CFI_STARTPROC
+ FAKE_STACK_FRAME $child_rip
+ SAVE_ALL
+
+ # rdi: flags, rsi: usp, rdx: will be &pt_regs
+ movq %rdx,%rdi
+ orq kernel_thread_flags(%rip),%rdi
+ movq $-1, %rsi
+ movq %rsp, %rdx
+
+ xorl %r8d,%r8d
+ xorl %r9d,%r9d
+
+ # clone now
+ call do_fork
+ movq %rax,RAX(%rsp)
+ xorl %edi,%edi
+
+ /*
+ * It isn't worth to check for reschedule here,
+ * so internally to the x86_64 port you can rely on kernel_thread()
+ * not to reschedule the child before returning, this avoids the need
+ * of hacks for example to fork off the per-CPU idle tasks.
+ * [Hopefully no generic code relies on the reschedule -AK]
+ */
+ RESTORE_ALL
+ UNFAKE_STACK_FRAME
+ ret
+ CFI_ENDPROC
+
+
+child_rip:
+ /*
+ * Here we are in the child and the registers are set as they were
+ * at kernel_thread() invocation in the parent.
+ */
+ movq %rdi, %rax
+ movq %rsi, %rdi
+ call *%rax
+ # exit
+ xorq %rdi, %rdi
+ call do_exit
+
+/*
+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
+ *
+ * C extern interface:
+ * extern long execve(char *name, char **argv, char **envp)
+ *
+ * asm input arguments:
+ * rdi: name, rsi: argv, rdx: envp
+ *
+ * We want to fallback into:
+ * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
+ *
+ * do_sys_execve asm fallback arguments:
+ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
+ */
+ENTRY(execve)
+ CFI_STARTPROC
+ FAKE_STACK_FRAME $0
+ SAVE_ALL
+ call sys_execve
+ movq %rax, RAX(%rsp)
+ RESTORE_REST
+ testq %rax,%rax
+ jne 1f
+ jmp int_ret_from_sys_call
+1: RESTORE_ARGS
+ UNFAKE_STACK_FRAME
+ ret
+ CFI_ENDPROC
+
+
+ /*
+ * Copy error_entry because of the different stack frame
+ */
+ENTRY(page_fault)
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp # now %rsp points to %cr2
+ pushq %rax
+ leaq do_page_fault(%rip),%rax
+ cld
+ subq $13*8,%rsp
+ movq %rdx,12*8(%rsp) # save %rdx
+ movq 13*8(%rsp),%rdx # load rax
+ movq %rcx,11*8(%rsp)
+ movq %rdx,10*8(%rsp) # store rax
+ movq %rsi,13*8(%rsp) # now save %rsi
+ movq 14*8(%rsp),%rdx # load %cr2, 3rd argument
+ movq %r8, 9*8(%rsp)
+ movq %r9, 8*8(%rsp)
+ movq %r10,7*8(%rsp)
+ movq %r11,6*8(%rsp)
+ movq %rbx,5*8(%rsp)
+ movq %rbp,4*8(%rsp)
+ movq %r12,3*8(%rsp)
+ movq %r13,2*8(%rsp)
+ movq %r14,1*8(%rsp)
+ movq %r15,(%rsp)
+#if 0
+ cmpl $__KERNEL_CS,CS(%rsp)
+ je error_kernelspace
+#endif
+ /*
+ * 1st and 2nd arguments are set by error_call_handler
+ */
+ jmp error_call_handler
+
+ENTRY(coprocessor_error)
+ zeroentry do_coprocessor_error
+
+ENTRY(simd_coprocessor_error)
+ zeroentry do_simd_coprocessor_error
+
+ENTRY(device_not_available)
+ zeroentry math_state_restore
+
+ /* runs on exception stack */
+ENTRY(debug)
+ CFI_STARTPROC
+ pushq $0
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_debug
+ /* switch back to process stack to restore the state ptrace touched */
+ movq %rax,%rsp
+ jmp paranoid_exit
+ CFI_ENDPROC
+
+#if 0
+ /* runs on exception stack */
+ENTRY(nmi)
+ CFI_STARTPROC
+ pushq $-1
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_nmi
+ /* ebx: no swapgs flag */
+#endif
+paranoid_exit:
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz paranoid_restore
+paranoid_swapgs:
+/* cli
+ swapgs */
+paranoid_restore:
+ RESTORE_ALL 8
+/* iretq */
+paranoid_userspace:
+/* cli */
+ GET_THREAD_INFO(%rcx)
+ movl threadinfo_flags(%rcx),%edx
+ testl $_TIF_NEED_RESCHED,%edx
+ jnz paranoid_resched
+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
+ jnz paranoid_signal
+ jmp paranoid_swapgs
+paranoid_resched:
+/* sti */
+ call schedule
+ jmp paranoid_exit
+paranoid_signal:
+/* sti */
+ xorl %esi,%esi /* oldset */
+ movq %rsp,%rdi /* &pt_regs */
+ call do_notify_resume
+ jmp paranoid_exit
+ CFI_ENDPROC
+
+ENTRY(int3)
+ zeroentry do_int3
+
+ENTRY(overflow)
+ zeroentry do_overflow
+
+ENTRY(bounds)
+ zeroentry do_bounds
+
+ENTRY(invalid_op)
+ zeroentry do_invalid_op
+
+ENTRY(coprocessor_segment_overrun)
+ zeroentry do_coprocessor_segment_overrun
+
+ENTRY(reserved)
+ zeroentry do_reserved
+
+ /* runs on exception stack */
+ENTRY(double_fault)
+ CFI_STARTPROC
+ paranoidentry do_double_fault
+ movq %rax,%rsp
+ jmp paranoid_exit
+ CFI_ENDPROC
+
+ENTRY(invalid_TSS)
+ errorentry do_invalid_TSS
+
+ENTRY(segment_not_present)
+ errorentry do_segment_not_present
+
+ /* runs on exception stack */
+ENTRY(stack_segment)
+ CFI_STARTPROC
+ paranoidentry do_stack_segment
+ movq %rax,%rsp
+ jmp paranoid_exit
+ CFI_ENDPROC
+
+ENTRY(general_protection)
+ errorentry do_general_protection
+
+ENTRY(alignment_check)
+ errorentry do_alignment_check
+
+ENTRY(divide_error)
+ zeroentry do_divide_error
+
+ENTRY(spurious_interrupt_bug)
+ zeroentry do_spurious_interrupt_bug
+
+#ifdef CONFIG_X86_MCE
+ /* runs on exception stack */
+ENTRY(machine_check)
+ CFI_STARTPROC
+ pushq $0
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_machine_check
+ jmp paranoid_exit
+ CFI_ENDPROC
+#endif
+
+ENTRY(call_debug)
+ zeroentry do_call_debug
+
+
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
+ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
+ *
+ * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen
+ */
+
+
+#include <linux/linkage.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=3.0,VIRT_BASE=0xffffffff80100000"
+ .ascii ",LOADER=generic"
+/* .ascii ",PT_MODE_WRITABLE" */
+ .byte 0
+
+
+#include <linux/threads.h>
+#include <asm/desc.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/msr.h>
+#include <asm/cache.h>
+/* #include <asm/thread_info.h> */
+
+
+/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
+ * because we need identity-mapped pages on setup so define __START_KERNEL to
+ * 0x100000 for this stage
+ *
+ */
+
+ .text
+ .code64
+ENTRY(_start)
+ cld
+ movq init_rsp(%rip),%rsp
+ /* Copy the necessary stuff from xen_start_info structure. */
+ movq $xen_start_info_union,%rdi
+ movq $64,%rcx /* sizeof (union xen_start_info_union) / sizeof (long) */
+ rep movsq
+
+#ifdef CONFIG_SMP
+ ENTRY(startup_64_smp)
+ cld
+#endif /* CONFIG_SMP */
+
+ /* zero EFLAGS after setting rsp */
+ pushq $0
+ popfq
+ movq initial_code(%rip),%rax
+ jmp *%rax
+
+ /* SMP bootup changes these two */
+ .globl initial_code
+initial_code:
+ .quad x86_64_start_kernel
+ .globl init_rsp
+init_rsp:
+ .quad init_thread_union+THREAD_SIZE-8
+
+ENTRY(early_idt_handler)
+ xorl %eax,%eax
+ movq 8(%rsp),%rsi # get rip
+ movq (%rsp),%rdx
+ leaq early_idt_msg(%rip),%rdi
+1: hlt # generate #GP
+ jmp 1b
+
+early_idt_msg:
+ .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
+
+#if 0
+ENTRY(lgdt_finish)
+ movl $(__USER_DS),%eax # DS/ES contains default USER segment
+ movw %ax,%ds
+ movw %ax,%es
+ movl $(__KERNEL_DS),%eax
+ movw %ax,%ss # after changing gdt.
+ popq %rax # get the retrun address
+ pushq $(__KERNEL_CS)
+ pushq %rax
+ lretq
+#endif
+
+ENTRY(stext)
+ENTRY(_stext)
+
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+ * 0xffffffff80000000 to physical address 0x000000. (always using
+ * 2Mbyte large pages provided by PAE mode)
+ */
+.org 0x1000
+ENTRY(init_level4_pgt)
+ .fill 512,8,0
+
+ /*
+ * We update two pgd entries to make kernel and user pgd consistent
+ * at pgd_populate(). It can be used for kernel modules. So we place
+ * this page here for those cases to avoid memory corruption.
+ * We also use this page to establish the initiali mapping for
+ * vsyscall area.
+ */
+.org 0x2000
+ENTRY(init_level4_user_pgt)
+ .fill 512,8,0
+
+ /*
+ * This is used for vsyscall area mapping as we have a different
+ * level4 page table for user.
+ */
+.org 0x3000
+ENTRY(level3_user_pgt)
+ .fill 512,8,0
+
+.org 0x4000
+ENTRY(cpu_gdt_table)
+/* The TLS descriptors are currently at a different place compared to i386.
+ Hopefully nobody expects them at a fixed place (Wine?) */
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x008ffa000000ffff /* __KERNEL_COMPAT32_CS */
+ .quad 0x00affa000000ffff /* __KERNEL_CS */
+ .quad 0x00cff2000000ffff /* __KERNEL_DS */
+
+ .quad 0x00cffa000000ffff /* __USER32_CS */
+ .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
+ .quad 0x00affa000000ffff /* __USER_CS */
+ .quad 0x00cffa000000ffff /* __KERNEL32_CS */
+ .quad 0,0 /* TSS */
+ .quad 0 /* LDT */
+ .quad 0,0,0 /* three TLS descriptors */
+ .quad 0 /* unused now */
+
+gdt_end:
+ /* asm/segment.h:GDT_ENTRIES must match this */
+ /* This should be a multiple of the cache line size */
+ /* GDTs of other CPUs: */
+ .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
+
+.org 0x5000
+ENTRY(empty_zero_page)
+
+.org 0x6000
+ENTRY(empty_bad_page)
+
+.org 0x7000
+ENTRY(empty_bad_pte_table)
+
+.org 0x8000
+ENTRY(empty_bad_pmd_table)
+
+ .org 0x9000
+#ifdef CONFIG_ACPI_SLEEP
+ENTRY(wakeup_level4_pgt)
+ .quad 0x0000000000102007 /* -> level3_ident_pgt */
+ .fill 255,8,0
+ .quad 0x000000000010a007
+ .fill 254,8,0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad 0x0000000000103007 /* -> level3_kernel_pgt */
+#endif
+
+ .data
+
+ .align 16
+ .globl cpu_gdt_descr
+cpu_gdt_descr:
+ .word gdt_end-cpu_gdt_table
+gdt:
+ .quad cpu_gdt_table
+#ifdef CONFIG_SMP
+ .rept NR_CPUS-1
+ .word 0
+ .quad 0
+ .endr
+#endif
+
+ENTRY(gdt_table32)
+ .quad 0x0000000000000000 /* This one is magic */
+ .quad 0x0000000000000000 /* unused */
+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
+gdt32_end:
+
+/* We need valid kernel segments for data and code in long mode too
+ * IRET will check the segment types kkeil 2000/10/28
+ * Also sysret mandates a special GDT layout
+ */
+
+#if 0
+.align L1_CACHE_BYTES
+#endif
+ .align L1_CACHE_BYTES
+ENTRY(idt_table)
+ .rept 256
+ .quad 0
+ .quad 0
+ .endr
+
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *
+ * $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/percpu.h>
+
+#include <asm/processor.h>
+#include <asm/proto.h>
+#include <asm/smp.h>
+#include <asm/bootsetup.h>
+#include <asm/setup.h>
+#include <asm/desc.h>
+
+unsigned long start_pfn;
+
+/* Don't add a printk in there. printk relies on the PDA which is not initialized
+ yet. */
+#if 0
+static void __init clear_bss(void)
+{
+ extern char __bss_start[], __bss_end[];
+ memset(__bss_start, 0,
+ (unsigned long) __bss_end - (unsigned long) __bss_start);
+}
+#endif
+
+extern char x86_boot_params[2048];
+
+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
+#define OLD_CL_MAGIC_ADDR 0x90020
+#define OLD_CL_MAGIC 0xA33F
+#define OLD_CL_BASE_ADDR 0x90000
+#define OLD_CL_OFFSET 0x90022
+
+extern char saved_command_line[];
+
+#if 0
+static void __init copy_bootdata(char *real_mode_data)
+{
+ int new_data;
+ char * command_line;
+
+ memcpy(x86_boot_params, real_mode_data, 2048);
+ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
+ if (!new_data) {
+ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
+ printk("so old bootloader that it does not support commandline?!\n");
+ return;
+ }
+ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
+ printk("old bootloader convention, maybe loadlin?\n");
+ }
+ command_line = (char *) ((u64)(new_data));
+ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+ printk("Bootdata ok (command line is %s)\n", saved_command_line);
+}
+#endif
+
+static void __init setup_boot_cpu_data(void)
+{
+ int dummy, eax;
+
+ /* get vendor info */
+ cpuid(0, &boot_cpu_data.cpuid_level,
+ (int *)&boot_cpu_data.x86_vendor_id[0],
+ (int *)&boot_cpu_data.x86_vendor_id[8],
+ (int *)&boot_cpu_data.x86_vendor_id[4]);
+
+ /* get cpu type */
+ cpuid(1, &eax, &dummy, &dummy,
+ (unsigned int *) &boot_cpu_data.x86_capability);
+ boot_cpu_data.x86 = (eax >> 8) & 0xf;
+ boot_cpu_data.x86_model = (eax >> 4) & 0xf;
+ boot_cpu_data.x86_mask = eax & 0xf;
+}
+
+extern char _end[];
+
+void __init x86_64_start_kernel(char * real_mode_data)
+{
+ int i;
+
+ phys_to_machine_mapping = (u32 *)xen_start_info.mfn_list;
+ start_pfn = (__pa(xen_start_info.pt_base) >> PAGE_SHIFT) + xen_start_info.nr_pt_frames;
+
+ for (i = 0; i < 256; i++)
+ set_intr_gate(i, early_idt_handler);
+#if 0
+ asm volatile("lidt %0" :: "m" (idt_descr));
+#endif
+ pda_init(0);
+ /* copy_bootdata(real_mode_data); */
+#ifdef CONFIG_SMP
+ cpu_set(0, cpu_online_map);
+#endif
+ /* default console: */
+ if (!strstr(saved_command_line, "console="))
+ strcat(saved_command_line, " console=tty0");
+#if 0
+ s = strstr(saved_command_line, "earlyprintk=");
+ if (s != NULL)
+ setup_early_printk(s);
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+ s = strstr(saved_command_line, "numa=");
+ if (s != NULL)
+ numa_setup(s+5);
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ if (strstr(saved_command_line, "disableapic"))
+ disable_apic = 1;
+#endif
+ /* You need early console to see that */
+ if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
+ panic("Kernel too big for kernel mapping\n");
+
+ setup_boot_cpu_data();
+ start_kernel();
+}
--- /dev/null
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+/*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+ * so they are allowed to end up in the .data.cacheline_aligned
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp;
+
+#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/ioport.c
+ *
+ * This contains the io-permission bitmap code - written by obz, with changes
+ * by Linus.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/thread_info.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+
+/*
+ * sys_iopl has to be used when you want to access the IO ports
+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
+ *
+ */
+
+// asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
+asmlinkage long sys_iopl(unsigned int new_io_pl)
+{
+ unsigned int old_io_pl = current->thread.io_pl;
+ dom0_op_t op;
+
+
+ if (new_io_pl > 3)
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+ if (new_io_pl > old_io_pl) {
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ }
+
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ return -EPERM;
+
+ /* Maintain OS privileges even if user attempts to relinquish them. */
+ if (new_io_pl == 0)
+ new_io_pl = 1;
+
+ /* Change our version of the privilege levels. */
+ current->thread.io_pl = new_io_pl;
+
+ /* Force the change at ring 0. */
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = new_io_pl;
+ HYPERVISOR_dom0_op(&op);
+
+ return 0;
+
+}
+
+/*
+ * this changes the io permissions bitmap in the current task.
+ */
+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+{
+ return turn_on ? sys_iopl(3) : 0;
+}
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the lowest level x86_64-specific interrupt
+ * entry and irq statistics code. All the remaining irq logic is
+ * done by the generic kernel/irq/ code and in the
+ * x86_64-specific irq controller code. (e.g. i8259.c and
+ * io_apic.c.)
+ */
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+/*
+ * Interrupt statistics:
+ */
+
+atomic_t irq_err_count;
+
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ struct irqaction * action;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_printf(p, " ");
+ for (j=0; j<NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "CPU%d ",j);
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j=0; j<NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ",
+ kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+
+ seq_printf(p, " %s", action->name);
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+ seq_putc(p, '\n');
+skip:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ } else if (i == NR_IRQS) {
+ seq_printf(p, "NMI: ");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
+ seq_putc(p, '\n');
+#ifdef CONFIG_X86_LOCAL_APIC
+ seq_printf(p, "LOC: ");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
+ seq_putc(p, '\n');
+#endif
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+#endif
+ }
+ return 0;
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
+{
+ /* high bits used in ret_from_ code */
+ int irq = regs->orig_rax & __IRQ_MASK(HARDIRQ_BITS);
+
+ irq_enter();
+
+ __do_IRQ(irq, regs);
+ irq_exit();
+
+ return 1;
+}
+
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/ldt.c
+ *
+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2002 Andi Kleen
+ *
+ * This handles calls from both 32bit and 64bit mode.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
+#include <asm/proto.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
+static void flush_ldt(void *null)
+{
+ if (current->active_mm) {
+ load_LDT(¤t->active_mm->context);
+ flush_page_update_queue();
+ }
+}
+#endif
+
+static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
+{
+ void *oldldt;
+ void *newldt;
+ unsigned oldsize;
+
+ if (mincount <= (unsigned)pc->size)
+ return 0;
+ oldsize = pc->size;
+ mincount = (mincount+511)&(~511);
+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+ else
+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+
+ if (!newldt)
+ return -ENOMEM;
+
+ if (oldsize)
+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+ oldldt = pc->ldt;
+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
+ wmb();
+ pc->ldt = newldt;
+ wmb();
+ pc->size = mincount;
+ wmb();
+ if (reload) {
+#ifdef CONFIG_SMP
+ cpumask_t mask;
+
+ preempt_disable();
+#endif
+ make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ load_LDT(pc);
+ flush_page_update_queue();
+#ifdef CONFIG_SMP
+ mask = cpumask_of_cpu(smp_processor_id());
+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+ smp_call_function(flush_ldt, NULL, 1, 1);
+ preempt_enable();
+#else
+ load_LDT(pc);
+#endif
+ }
+ if (oldsize) {
+ make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ flush_page_update_queue();
+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(oldldt);
+ else
+ kfree(oldldt);
+ }
+ return 0;
+}
+
+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+{
+ int err = alloc_ldt(new, old->size, 0);
+ if (err < 0)
+ return err;
+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+ make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ flush_page_update_queue();
+ return 0;
+}
+
+/*
+ * we do not have to muck with descriptors here, that is
+ * done in switch_mm() as needed.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ struct mm_struct * old_mm;
+ int retval = 0;
+
+ init_MUTEX(&mm->context.sem);
+ mm->context.size = 0;
+ old_mm = current->mm;
+ if (old_mm && old_mm->context.size > 0) {
+ down(&old_mm->context.sem);
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ up(&old_mm->context.sem);
+ }
+ return retval;
+}
+
+/*
+ *
+ * Don't touch the LDT register - we're already in the next thread.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ if (mm->context.size) {
+ if (mm == current->active_mm)
+ clear_LDT();
+ make_pages_writable(mm->context.ldt,
+ (mm->context.size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ flush_page_update_queue();
+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(mm->context.ldt);
+ else
+ kfree(mm->context.ldt);
+ mm->context.size = 0;
+ }
+}
+
+static int read_ldt(void __user * ptr, unsigned long bytecount)
+{
+ int err;
+ unsigned long size;
+ struct mm_struct * mm = current->mm;
+
+ if (!mm->context.size)
+ return 0;
+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+
+ down(&mm->context.sem);
+ size = mm->context.size*LDT_ENTRY_SIZE;
+ if (size > bytecount)
+ size = bytecount;
+
+ err = 0;
+ if (copy_to_user(ptr, mm->context.ldt, size))
+ err = -EFAULT;
+ up(&mm->context.sem);
+ if (err < 0)
+ goto error_return;
+ if (size != bytecount) {
+ /* zero-fill the rest */
+ if (clear_user(ptr+size, bytecount-size) != 0) {
+ err = -EFAULT;
+ goto error_return;
+ }
+ }
+ return bytecount;
+error_return:
+ return err;
+}
+
+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
+{
+ /* Arbitrary number */
+ /* x86-64 default LDT is all zeros */
+ if (bytecount > 128)
+ bytecount = 128;
+ if (clear_user(ptr, bytecount))
+ return -EFAULT;
+ return bytecount;
+}
+
+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
+{
+ struct task_struct *me = current;
+ struct mm_struct * mm = me->mm;
+ unsigned long entry = 0, *lp;
+ unsigned long mach_lp;
+ int error;
+ struct user_desc ldt_info;
+
+ error = -EINVAL;
+
+ if (bytecount != sizeof(ldt_info))
+ goto out;
+ error = -EFAULT;
+ if (copy_from_user(&ldt_info, ptr, bytecount))
+ goto out;
+
+ error = -EINVAL;
+ if (ldt_info.entry_number >= LDT_ENTRIES)
+ goto out;
+ if (ldt_info.contents == 3) {
+ if (oldmode)
+ goto out;
+ if (ldt_info.seg_not_present == 0)
+ goto out;
+ }
+
+ down(&mm->context.sem);
+ if (ldt_info.entry_number >= (unsigned)mm->context.size) {
+ error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1);
+ if (error < 0)
+ goto out_unlock;
+ }
+
+ lp = (unsigned long *)((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
+ mach_lp = arbitrary_virt_to_machine(lp);
+
+ /* Allow LDTs to be cleared by the user. */
+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+ if (oldmode || LDT_empty(&ldt_info)) {
+ entry = 0;
+ goto install;
+ }
+ }
+
+#if 0
+ entry = LDT_entry(&ldt_info);
+#endif
+ if (oldmode)
+ entry &= ~(1 << 20);
+
+ /* Install the new entry ... */
+install:
+ error = HYPERVISOR_update_descriptor(mach_lp, entry);
+
+out_unlock:
+ up(&mm->context.sem);
+out:
+ return error;
+}
+
+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+{
+ int ret = -ENOSYS;
+
+ switch (func) {
+ case 0:
+ ret = read_ldt(ptr, bytecount);
+ break;
+ case 1:
+ ret = write_ldt(ptr, bytecount, 1);
+ break;
+ case 2:
+ ret = read_default_ldt(ptr, bytecount);
+ break;
+ case 0x11:
+ ret = write_ldt(ptr, bytecount, 0);
+ break;
+ }
+ return ret;
+}
--- /dev/null
+/*
+ * Dynamic DMA mapping support.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm-xen/balloon.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define pte_offset_kernel pte_offset
+#define pud_t pgd_t
+#define pud_offset(d, va) d
+#endif
+
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scatter-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+ for (i = 0; i < nents; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+ s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ s->dma_length = s->length;
+ }
+ return nents;
+}
+
+EXPORT_SYMBOL(dma_map_sg);
+
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int dir)
+{
+ int i;
+ for (i = 0; i < nents; i++) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(s->page == NULL);
+ BUG_ON(s->dma_address == 0);
+ dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
+ }
+}
+
+struct dma_coherent_mem {
+ void *virt_base;
+ u32 device_base;
+ int size;
+ int flags;
+ unsigned long *bitmap;
+};
+
+static void
+xen_contig_memory(unsigned long vstart, unsigned int order)
+{
+ /*
+ * Ensure multi-page extents are contiguous in machine memory.
+ * This code could be cleaned up some, and the number of
+ * hypercalls reduced.
+ */
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long pfn, i, flags;
+
+ scrub_pages(vstart, 1 << order);
+
+ balloon_lock(flags);
+
+ /* 1. Zap current PTEs, giving away the underlying pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn = pte->pte >> PAGE_SHIFT;
+ queue_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ (u32)INVALID_P2M_ENTRY;
+ flush_page_update_queue();
+ if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ &pfn, 1, 0) != 1) BUG();
+ }
+ /* 2. Get a new contiguous memory extent. */
+ if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+ &pfn, 1, order) != 1) BUG();
+ /* 3. Map the new extent in place of old pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ queue_l1_entry_update(
+ pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
+ queue_machphys_update(
+ pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ pfn+i;
+ }
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
+
+ balloon_unlock(flags);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+#else
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, unsigned gfp)
+#endif
+{
+ void *ret;
+ unsigned int order = get_order(size);
+ unsigned long vstart;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ int gfp = GFP_ATOMIC;
+
+ if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+#else
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (mem) {
+ int page = bitmap_find_free_region(mem->bitmap, mem->size,
+ order);
+ if (page >= 0) {
+ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
+ ret = mem->virt_base + (page << PAGE_SHIFT);
+ memset(ret, 0, size);
+ return ret;
+ }
+ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
+ return NULL;
+ }
+
+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+#endif
+
+ vstart = __get_free_pages(gfp, order);
+ ret = (void *)vstart;
+ if (ret == NULL)
+ return ret;
+
+ xen_contig_memory(vstart, order);
+
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+#else
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ int order = get_order(size);
+
+ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+
+ bitmap_release_region(mem->bitmap, page, order);
+ } else
+ free_pages((unsigned long)vaddr, order);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+#if 0
+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+{
+ void __iomem *mem_base;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = (pages + 31)/32;
+
+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
+ goto out;
+ if (!size)
+ goto out;
+ if (dev->dma_mem)
+ goto out;
+
+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+
+ mem_base = ioremap(bus_addr, size);
+ if (!mem_base)
+ goto out;
+
+ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+ if (!dev->dma_mem)
+ goto out;
+ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
+ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
+ if (!dev->dma_mem->bitmap)
+ goto free1_out;
+ memset(dev->dma_mem->bitmap, 0, bitmap_size);
+
+ dev->dma_mem->virt_base = mem_base;
+ dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->size = pages;
+ dev->dma_mem->flags = flags;
+
+ if (flags & DMA_MEMORY_MAP)
+ return DMA_MEMORY_MAP;
+
+ return DMA_MEMORY_IO;
+
+ free1_out:
+ kfree(dev->dma_mem->bitmap);
+ out:
+ return 0;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+
+ if(!mem)
+ return;
+ dev->dma_mem = NULL;
+ iounmap(mem->virt_base);
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int pos, err;
+
+ if (!mem)
+ return ERR_PTR(-EINVAL);
+
+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
+ if (err != 0)
+ return ERR_PTR(err);
+ return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+#endif
+#endif
--- /dev/null
+/* Fallback functions when the main IOMMU code is not compiled in. This
+ code is roughly equivalent to i386. */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <asm/proto.h>
+#include <asm/processor.h>
+
+int iommu_merge = 0;
+EXPORT_SYMBOL(iommu_merge);
+
+dma_addr_t bad_dma_address;
+EXPORT_SYMBOL(bad_dma_address);
+
+int iommu_bio_merge = 0;
+EXPORT_SYMBOL(iommu_bio_merge);
+
+int iommu_sac_force = 0;
+EXPORT_SYMBOL(iommu_sac_force);
+
+#if 0
+/*
+ * Dummy IO MMU functions
+ */
+
+void *dma_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned gfp)
+{
+ void *ret;
+ u64 mask;
+ int order = get_order(size);
+
+ if (hwdev)
+ mask = hwdev->coherent_dma_mask & *hwdev->dma_mask;
+ else
+ mask = 0xffffffff;
+ for (;;) {
+ ret = (void *)__get_free_pages(gfp, order);
+ if (ret == NULL)
+ return NULL;
+ *dma_handle = virt_to_bus(ret);
+ if ((*dma_handle & ~mask) == 0)
+ break;
+ free_pages((unsigned long)ret, order);
+ if (gfp & GFP_DMA)
+ return NULL;
+ gfp |= GFP_DMA;
+ }
+
+ memset(ret, 0, size);
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+EXPORT_SYMBOL(dma_free_coherent);
+#endif
+
+int dma_supported(struct device *hwdev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ * RED-PEN this won't work for pci_map_single. Caller has to
+ * use GFP_DMA in the first place.
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_get_cache_alignment(void)
+{
+ return boot_cpu_data.x86_clflush_size;
+}
+EXPORT_SYMBOL(dma_get_cache_alignment);
+
+static int __init check_ram(void)
+{
+ if (end_pfn >= 0xffffffff>>PAGE_SHIFT) {
+ printk(
+ KERN_ERR "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
+ KERN_ERR "WARNING 32bit PCI may malfunction.\n");
+ }
+ return 0;
+}
+__initcall(check_ram);
+
--- /dev/null
+/*
+ * linux/arch/x86-64/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * X86-64 port
+ * Andi Kleen.
+ *
+ * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elfcore.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/module.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/ptrace.h>
+#include <linux/utsname.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/mmu_context.h>
+#include <asm/pda.h>
+#include <asm/prctl.h>
+#include <asm/kdebug.h>
+#include <asm-xen/multicall.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm/desc.h>
+#include <asm/proto.h>
+#include <asm/hardirq.h>
+#include <asm/ia32.h>
+
+asmlinkage extern void ret_from_fork(void);
+
+unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
+
+atomic_t hlt_counter = ATOMIC_INIT(0);
+
+unsigned long boot_option_idle_override = 0;
+EXPORT_SYMBOL(boot_option_idle_override);
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void);
+static cpumask_t cpu_idle_map;
+
+void disable_hlt(void)
+{
+ atomic_inc(&hlt_counter);
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+ atomic_dec(&hlt_counter);
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
+extern int set_timeout_timer(void);
+void xen_idle(void)
+{
+ int cpu;
+
+ cpu = smp_processor_id();
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, 0);
+
+ if (need_resched()) {
+ local_irq_enable();
+ } else if (set_timeout_timer() == 0) {
+ /* NB. Blocking reenable events in a race-free manner. */
+ HYPERVISOR_block();
+ } else {
+ local_irq_enable();
+ HYPERVISOR_yield();
+ }
+ set_need_resched();
+}
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle (void)
+{
+ int cpu = smp_processor_id();
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched()) {
+ if (cpu_isset(cpu, cpu_idle_map))
+ cpu_clear(cpu, cpu_idle_map);
+ rmb();
+
+ __IRQ_STAT(cpu,idle_timestamp) = jiffies;
+ xen_idle();
+ }
+ schedule();
+ }
+}
+
+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
+/* Always use xen_idle() instead. */
+void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
+
+/* Prints also some state that isn't saved in the pt_regs */
+void __show_regs(struct pt_regs * regs)
+{
+ unsigned long fs, gs, shadowgs;
+ unsigned int fsindex,gsindex;
+ unsigned int ds,cs,es;
+
+ printk("\n");
+ print_modules();
+ printk("Pid: %d, comm: %.20s %s %s\n",
+ current->pid, current->comm, print_tainted(), system_utsname.release);
+ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
+ printk_address(regs->rip);
+ printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
+ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
+ regs->rax, regs->rbx, regs->rcx);
+ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
+ regs->rdx, regs->rsi, regs->rdi);
+ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
+ regs->rbp, regs->r8, regs->r9);
+ printk("R10: %016lx R11: %016lx R12: %016lx\n",
+ regs->r10, regs->r11, regs->r12);
+ printk("R13: %016lx R14: %016lx R15: %016lx\n",
+ regs->r13, regs->r14, regs->r15);
+
+ asm("movl %%ds,%0" : "=r" (ds));
+ asm("movl %%cs,%0" : "=r" (cs));
+ asm("movl %%es,%0" : "=r" (es));
+ asm("movl %%fs,%0" : "=r" (fsindex));
+ asm("movl %%gs,%0" : "=r" (gsindex));
+
+ rdmsrl(MSR_FS_BASE, fs);
+ rdmsrl(MSR_GS_BASE, gs);
+ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+
+ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
+ fs,fsindex,gs,gsindex,shadowgs);
+ printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
+
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ __show_regs(regs);
+ show_trace(®s->rsp);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ struct task_struct *me = current;
+ struct thread_struct *t = &me->thread;
+ if (me->thread.io_bitmap_ptr) {
+ struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
+
+ kfree(t->io_bitmap_ptr);
+ t->io_bitmap_ptr = NULL;
+ /*
+ * Careful, clear this in the TSS too:
+ */
+ memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
+ t->io_bitmap_max = 0;
+ put_cpu();
+ }
+}
+
+void load_gs_index(unsigned gs)
+{
+ __load_gs_index(gs);
+}
+
+void flush_thread(void)
+{
+ struct task_struct *tsk = current;
+ struct thread_info *t = current_thread_info();
+
+ if (t->flags & _TIF_ABI_PENDING)
+ t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
+
+ tsk->thread.debugreg0 = 0;
+ tsk->thread.debugreg1 = 0;
+ tsk->thread.debugreg2 = 0;
+ tsk->thread.debugreg3 = 0;
+ tsk->thread.debugreg6 = 0;
+ tsk->thread.debugreg7 = 0;
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+ * Forget coprocessor state..
+ */
+ clear_fpu(tsk);
+ clear_used_math();
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ if (dead_task->mm) {
+ if (dead_task->mm->context.size) {
+ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
+ dead_task->comm,
+ dead_task->mm->context.ldt,
+ dead_task->mm->context.size);
+ BUG();
+ }
+ }
+}
+
+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
+{
+ struct user_desc ud = {
+ .base_addr = addr,
+ .limit = 0xfffff,
+ .seg_32bit = 1,
+ .limit_in_pages = 1,
+ .useable = 1,
+ };
+ struct n_desc_struct *desc = (void *)t->thread.tls_array;
+ desc += tls;
+ desc->a = LDT_entry_a(&ud);
+ desc->b = LDT_entry_b(&ud);
+}
+
+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
+{
+ struct desc_struct *desc = (void *)t->thread.tls_array;
+ desc += tls;
+ return desc->base0 |
+ (((u32)desc->base1) << 16) |
+ (((u32)desc->base2) << 24);
+}
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+ unlazy_fpu(tsk);
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ int err;
+ struct pt_regs * childregs;
+ struct task_struct *me = current;
+
+ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+
+ *childregs = *regs;
+
+ childregs->rax = 0;
+ childregs->rsp = rsp;
+ if (rsp == ~0UL) {
+ childregs->rsp = (unsigned long)childregs;
+ }
+
+ p->thread.rsp = (unsigned long) childregs;
+ p->thread.rsp0 = (unsigned long) (childregs+1);
+ p->thread.userrsp = me->thread.userrsp;
+
+ set_ti_thread_flag(p->thread_info, TIF_FORK);
+
+ p->thread.fs = me->thread.fs;
+ p->thread.gs = me->thread.gs;
+
+ asm("movl %%gs,%0" : "=m" (p->thread.gsindex));
+ asm("movl %%fs,%0" : "=m" (p->thread.fsindex));
+ asm("movl %%es,%0" : "=m" (p->thread.es));
+ asm("movl %%ds,%0" : "=m" (p->thread.ds));
+
+ if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+ if (!p->thread.io_bitmap_ptr) {
+ p->thread.io_bitmap_max = 0;
+ return -ENOMEM;
+ }
+ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
+ }
+
+ /*
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS) {
+#ifdef CONFIG_IA32_EMULATION
+ if (test_thread_flag(TIF_IA32))
+ err = ia32_child_tls(p, childregs);
+ else
+#endif
+ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
+ if (err)
+ goto out;
+ }
+ p->thread.io_pl = current->thread.io_pl;
+
+ err = 0;
+out:
+ if (err && p->thread.io_bitmap_ptr) {
+ kfree(p->thread.io_bitmap_ptr);
+ p->thread.io_bitmap_max = 0;
+ }
+ return err;
+}
+
+/*
+ * This special macro can be used to load a debugging register
+ */
+#define loaddebug(thread,register) \
+ HYPERVISOR_set_debugreg((register), \
+ (thread->debugreg ## register))
+
+/*
+ * switch_to(x,y) should switch tasks from x to y.
+ *
+ * This could still be optimized:
+ * - fold all the options into a flag word and test it with a single test.
+ * - could test fs/gs bitsliced
+ */
+struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+{
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ dom0_op_t op;
+
+ /* NB. No need to disable interrupts as already done in sched.c */
+ /* __cli(); */
+
+// MULTICALL_flush_page_update_queue();
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+ /*
+ * This is basically '__unlazy_fpu', except that we queue a
+ * multicall to indicate FPU task switch, rather than
+ * synchronously trapping to Xen.
+ */
+ if (prev_p->thread_info->status & TS_USEDFPU) {
+ save_init_fpu(prev_p);
+ queue_multicall0(__HYPERVISOR_fpu_taskswitch);
+ }
+
+ /*
+ * Reload esp0, LDT and the page table pointer:
+ */
+ tss->rsp0 = next->rsp0;
+// queue_multicall1(__HYPERVISOR_stack_switch, tss->rsp0);
+ HYPERVISOR_stack_switch(__KERNEL_DS, tss->rsp0);
+
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ * This is load_TLS(next, cpu) with multicalls.
+ */
+#define C(i) do { \
+ if (unlikely(next->tls_array[i] != prev->tls_array[i])) \
+ queue_multicall2(__HYPERVISOR_update_descriptor, \
+ virt_to_machine(&get_cpu_gdt_table(cpu) \
+ [GDT_ENTRY_TLS_MIN + i]), \
+ (unsigned long) &next->tls_array[i]); \
+} while (0)
+ C(0); C(1); C(2);
+#undef C
+
+ if (xen_start_info.flags & SIF_PRIVILEGED) {
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = next->io_pl;
+ op.interface_version = DOM0_INTERFACE_VERSION;
+ HYPERVISOR_dom0_op(&op);
+#if 0
+ queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
+#endif
+ }
+
+ /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
+// execute_multicall_list();
+ /* __sti(); */
+
+ /*
+ * Switch DS and ES.
+ * This won't pick up thread selector changes, but I guess that is ok.
+ */
+ asm volatile("movl %%es,%0" : "=m" (prev->es));
+ if (unlikely(next->es | prev->es))
+ loadsegment(es, next->es);
+
+ asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
+ if (unlikely(next->ds | prev->ds))
+ loadsegment(ds, next->ds);
+
+ /*
+ * Switch FS and GS.
+ */
+ {
+ unsigned fsindex;
+ asm volatile("movl %%fs,%0" : "=g" (fsindex));
+ /* segment register != 0 always requires a reload.
+ also reload when it has changed.
+ when prev process used 64bit base always reload
+ to avoid an information leak. */
+ if (unlikely(fsindex | next->fsindex | prev->fs)) {
+ loadsegment(fs, next->fsindex);
+ /* check if the user used a selector != 0
+ * if yes clear 64bit base, since overloaded base
+ * is always mapped to the Null selector
+ */
+ if (fsindex)
+ prev->fs = 0;
+ }
+ /* when next process has a 64bit base use it */
+ if (next->fs) {
+ HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs);
+ }
+ prev->fsindex = fsindex;
+ }
+ {
+ unsigned gsindex;
+ asm volatile("movl %%gs,%0" : "=g" (gsindex));
+ if (unlikely(gsindex | next->gsindex | prev->gs)) {
+ load_gs_index(next->gsindex);
+ if (gsindex)
+ prev->gs = 0;
+ }
+ if (next->gs)
+ HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs);
+ prev->gsindex = gsindex;
+ }
+
+ /*
+ * Switch the PDA context.
+ */
+ prev->userrsp = read_pda(oldrsp);
+ write_pda(oldrsp, next->userrsp);
+ write_pda(pcurrent, next_p);
+ write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
+
+ /*
+ * Now maybe reload the debug registers
+ */
+ if (unlikely(next->debugreg7)) {
+ loaddebug(next, 0);
+ loaddebug(next, 1);
+ loaddebug(next, 2);
+ loaddebug(next, 3);
+ /* no 4 and 5 */
+ loaddebug(next, 6);
+ loaddebug(next, 7);
+ }
+
+ /*
+ * Handle the IO bitmap
+ */
+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
+ if (next->io_bitmap_ptr)
+ /*
+ * Copy the relevant range of the IO bitmap.
+ * Normally this is 128 bytes or less:
+ */
+ memcpy(tss->io_bitmap, next->io_bitmap_ptr,
+ max(prev->io_bitmap_max, next->io_bitmap_max));
+ else {
+ /*
+ * Clear any possible leftover bits:
+ */
+ memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
+ }
+ }
+
+ return prev_p;
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage
+long sys_execve(char __user *name, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs regs)
+{
+ long error;
+ char * filename;
+
+ filename = getname(name);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ return error;
+ error = do_execve(filename, argv, envp, ®s);
+ if (error == 0) {
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ }
+ putname(filename);
+ return error;
+}
+
+void set_personality_64bit(void)
+{
+ /* inherit personality from parent */
+
+ /* Make sure to be in 64bit mode */
+ clear_thread_flag(TIF_IA32);
+
+ /* TBD: overwrites user setup. Should have two bits.
+ But 64bit processes have always behaved this way,
+ so it's not too bad. The main problem is just that
+ 32bit childs are affected again. */
+ current->personality &= ~READ_IMPLIES_EXEC;
+}
+
+asmlinkage long sys_fork(struct pt_regs *regs)
+{
+ return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
+}
+
+asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
+{
+ if (!newsp)
+ newsp = regs->rsp;
+ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage long sys_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
+ NULL, NULL);
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long stack;
+ u64 fp,rip;
+ int count = 0;
+
+ if (!p || p == current || p->state==TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)p->thread_info;
+ if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
+ return 0;
+ fp = *(u64 *)(p->thread.rsp);
+ do {
+ if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
+ return 0;
+ rip = *(u64 *)(fp+8);
+ if (!in_sched_functions(rip))
+ return rip;
+ fp = *(u64 *)fp;
+ } while (count++ < 16);
+ return 0;
+}
+
+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
+{
+ int ret = 0;
+ int doit = task == current;
+ int cpu;
+
+ switch (code) {
+ case ARCH_SET_GS:
+ if (addr >= TASK_SIZE)
+ return -EPERM;
+ cpu = get_cpu();
+ /* handle small bases via the GDT because that's faster to
+ switch. */
+ if (addr <= 0xffffffff) {
+ set_32bit_tls(task, GS_TLS, addr);
+ if (doit) {
+ load_TLS(&task->thread, cpu);
+ load_gs_index(GS_TLS_SEL);
+ }
+ task->thread.gsindex = GS_TLS_SEL;
+ task->thread.gs = 0;
+ } else {
+ task->thread.gsindex = 0;
+ task->thread.gs = addr;
+ if (doit) {
+ load_gs_index(0);
+ ret = HYPERVISOR_set_segment_base(SEGBASE_GS_USER, addr);
+ printk("do_arch_prctl: SET_SET: addr = %lx\n", addr);
+ }
+ }
+ put_cpu();
+ break;
+ case ARCH_SET_FS:
+ /* Not strictly needed for fs, but do it for symmetry
+ with gs */
+ if (addr >= TASK_SIZE)
+ return -EPERM;
+ cpu = get_cpu();
+ /* handle small bases via the GDT because that's faster to
+ switch. */
+ if (addr <= 0xffffffff) {
+ set_32bit_tls(task, FS_TLS, addr);
+ if (doit) {
+ load_TLS(&task->thread, cpu);
+ asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
+ }
+ task->thread.fsindex = FS_TLS_SEL;
+ task->thread.fs = 0;
+ } else {
+ task->thread.fsindex = 0;
+ task->thread.fs = addr;
+ if (doit) {
+ /* set the selector to 0 to not confuse
+ __switch_to */
+ asm volatile("movl %0,%%fs" :: "r" (0));
+ ret = HYPERVISOR_set_segment_base(SEGBASE_FS, addr);
+
+ }
+ }
+ put_cpu();
+ break;
+ case ARCH_GET_FS: {
+ unsigned long base;
+ if (task->thread.fsindex == FS_TLS_SEL)
+ base = read_32bit_tls(task, FS_TLS);
+ else if (doit) {
+ rdmsrl(MSR_FS_BASE, base);
+ printk("do_arch_prctl: GET_FS: addr = %lx\n", addr);
+ } else
+ base = task->thread.fs;
+ ret = put_user(base, (unsigned long __user *)addr);
+ break;
+ }
+ case ARCH_GET_GS: {
+ unsigned long base;
+ if (task->thread.gsindex == GS_TLS_SEL)
+ base = read_32bit_tls(task, GS_TLS);
+ else if (doit) {
+ rdmsrl(MSR_KERNEL_GS_BASE, base);
+ printk("do_arch_prctl: GET_GS: addr = %lx\n", addr);
+ } else
+ base = task->thread.gs;
+ ret = put_user(base, (unsigned long __user *)addr);
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+long sys_arch_prctl(int code, unsigned long addr)
+{
+ return do_arch_prctl(current, code, addr);
+}
+
+/*
+ * Capture the user space registers if the task is not running (in user space)
+ */
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+{
+ struct pt_regs *pp, ptregs;
+
+ pp = (struct pt_regs *)(tsk->thread.rsp0);
+ --pp;
+
+ ptregs = *pp;
+ ptregs.cs &= 0xffff;
+ ptregs.ss &= 0xffff;
+
+ elf_core_copy_regs(regs, &ptregs);
+
+ boot_option_idle_override = 1;
+ return 1;
+}
--- /dev/null
+/*
+ * linux/arch/x86-64/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Nov 2001 Dave Jones <davej@suse.de>
+ * Forked from i386 setup code.
+ *
+ * $Id$
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <asm/processor.h>
+#include <linux/console.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/kallsyms.h>
+#include <linux/edd.h>
+#include <linux/percpu.h>
+#include <asm/mtrr.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/msr.h>
+#include <asm/desc.h>
+#include <video/edid.h>
+#include <asm/e820.h>
+#include <asm/dma.h>
+#include <asm/mpspec.h>
+#include <asm/mmu_context.h>
+#include <asm/bootsetup.h>
+#include <asm/proto.h>
+#include <asm/setup.h>
+#include <asm/mach_apic.h>
+#include <asm/numa.h>
+
+#include "setup_arch_pre.h"
+#include <asm-xen/hypervisor.h>
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+
+#include <asm/mach-xen/setup_arch_post.h>
+
+extern unsigned long start_pfn;
+
+#if 0
+struct edid_info {
+ unsigned char dummy[128];
+};
+#endif
+
+extern struct edid_info edid_info;
+
+/* Allows setting of maximum possible memory size */
+unsigned long xen_override_max_pfn;
+/*
+ * Machine setup..
+ */
+
+struct cpuinfo_x86 boot_cpu_data;
+
+
+unsigned long mmu_cr4_features;
+EXPORT_SYMBOL_GPL(mmu_cr4_features);
+
+int acpi_disabled;
+EXPORT_SYMBOL(acpi_disabled);
+#ifdef CONFIG_ACPI_BOOT
+extern int __initdata acpi_ht;
+extern acpi_interrupt_flags acpi_sci_flags;
+int __initdata acpi_force = 0;
+#endif
+
+int acpi_numa __initdata;
+
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+
+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
+int bootloader_type;
+
+unsigned long saved_video_mode;
+
+#ifdef CONFIG_SWIOTLB
+int swiotlb;
+EXPORT_SYMBOL(swiotlb);
+#endif
+
+/*
+ * Setup options
+ */
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct sys_desc_table_struct {
+ unsigned short length;
+ unsigned char table[0];
+};
+
+struct edid_info edid_info;
+struct e820map e820;
+
+unsigned char aux_device_present;
+
+extern int root_mountflags;
+extern char _text, _etext, _edata, _end;
+
+char command_line[COMMAND_LINE_SIZE];
+
+struct resource standard_io_resources[] = {
+ { .name = "dma1", .start = 0x00, .end = 0x1f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "pic1", .start = 0x20, .end = 0x21,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "timer0", .start = 0x40, .end = 0x43,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "timer1", .start = 0x50, .end = 0x53,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "keyboard", .start = 0x60, .end = 0x6f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "pic2", .start = 0xa0, .end = 0xa1,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "dma2", .start = 0xc0, .end = 0xdf,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "fpu", .start = 0xf0, .end = 0xff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
+};
+
+#define STANDARD_IO_RESOURCES \
+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
+
+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
+
+struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_RAM,
+};
+struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_RAM,
+};
+
+#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static struct resource system_rom_resource = {
+ .name = "System ROM",
+ .start = 0xf0000,
+ .end = 0xfffff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource extension_rom_resource = {
+ .name = "Extension ROM",
+ .start = 0xe0000,
+ .end = 0xeffff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource adapter_rom_resources[] = {
+ { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM }
+};
+#endif
+
+#define ADAPTER_ROM_RESOURCES \
+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+
+static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+ .end = 0xc7fff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource video_ram_resource = {
+ .name = "Video RAM area",
+ .start = 0xa0000,
+ .end = 0xbffff,
+ .flags = IORESOURCE_RAM,
+};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+static int __init romchecksum(unsigned char *rom, unsigned long length)
+{
+ unsigned char *p, sum = 0;
+
+ for (p = rom; p < rom + length; p++)
+ sum += *p;
+ return sum == 0;
+}
+
+static void __init probe_roms(void)
+{
+ unsigned long start, length, upper;
+ unsigned char *rom;
+ int i;
+
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ video_rom_resource.start = start;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* if checksum okay, trust length byte */
+ if (length && romchecksum(rom, length))
+ video_rom_resource.end = start + length - 1;
+
+ request_resource(&iomem_resource, &video_rom_resource);
+ break;
+ }
+
+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+ if (start < upper)
+ start = upper;
+
+ /* system rom */
+ request_resource(&iomem_resource, &system_rom_resource);
+ upper = system_rom_resource.start;
+
+ /* check for extension rom (ignore length byte!) */
+ rom = isa_bus_to_virt(extension_rom_resource.start);
+ if (romsignature(rom)) {
+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
+ if (romchecksum(rom, length)) {
+ request_resource(&iomem_resource, &extension_rom_resource);
+ upper = extension_rom_resource.start;
+ }
+ }
+
+ /* check for adapter roms on 2k boundaries */
+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* but accept any length that fits if checksum okay */
+ if (!length || start + length > upper || !romchecksum(rom, length))
+ continue;
+
+ adapter_rom_resources[i].start = start;
+ adapter_rom_resources[i].end = start + length - 1;
+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+ start = adapter_rom_resources[i++].end & ~2047UL;
+ }
+}
+#endif
+
+/*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+EXPORT_SYMBOL(HYPERVISOR_shared_info);
+
+u32 *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
+DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
+DEFINE_PER_CPU(int, nr_multicall_ents);
+
+/* Raw start-of-day parameters from the hypervisor. */
+union xen_start_info_union xen_start_info_union;
+
+static __init void parse_cmdline_early (char ** cmdline_p)
+{
+ char c = ' ', *to = command_line, *from = COMMAND_LINE;
+ int len = 0;
+
+ memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ if (c != ' ')
+ goto next_char;
+
+#ifdef CONFIG_SMP
+ /*
+ * If the BIOS enumerates physical processors before logical,
+ * maxcpus=N at enumeration-time can be used to disable HT.
+ */
+ else if (!memcmp(from, "maxcpus=", 8)) {
+ extern unsigned int maxcpus;
+
+ maxcpus = simple_strtoul(from + 8, NULL, 0);
+ }
+#endif
+#ifdef CONFIG_ACPI_BOOT
+ /* "acpi=off" disables both ACPI table parsing and interpreter init */
+ if (!memcmp(from, "acpi=off", 8))
+ disable_acpi();
+
+ if (!memcmp(from, "acpi=force", 10)) {
+ /* add later when we do DMI horrors: */
+ acpi_force = 1;
+ acpi_disabled = 0;
+ }
+
+ /* acpi=ht just means: do ACPI MADT parsing
+ at bootup, but don't enable the full ACPI interpreter */
+ if (!memcmp(from, "acpi=ht", 7)) {
+ if (!acpi_force)
+ disable_acpi();
+ acpi_ht = 1;
+ }
+ else if (!memcmp(from, "pci=noacpi", 10))
+ acpi_disable_pci();
+ else if (!memcmp(from, "acpi=noirq", 10))
+ acpi_noirq_set();
+
+ else if (!memcmp(from, "acpi_sci=edge", 13))
+ acpi_sci_flags.trigger = 1;
+ else if (!memcmp(from, "acpi_sci=level", 14))
+ acpi_sci_flags.trigger = 3;
+ else if (!memcmp(from, "acpi_sci=high", 13))
+ acpi_sci_flags.polarity = 1;
+ else if (!memcmp(from, "acpi_sci=low", 12))
+ acpi_sci_flags.polarity = 3;
+
+ /* acpi=strict disables out-of-spec workarounds */
+ else if (!memcmp(from, "acpi=strict", 11)) {
+ acpi_strict = 1;
+ }
+#endif
+
+#if 0
+ if (!memcmp(from, "nolapic", 7) ||
+ !memcmp(from, "disableapic", 11))
+ disable_apic = 1;
+
+ if (!memcmp(from, "noapic", 6))
+ skip_ioapic_setp = 1;
+
+ if (!memcmp(from, "apic", 4)) {
+ skip_ioapic_setup = 0;
+ ioapic_force = 1;
+ }
+#endif
+
+ if (!memcmp(from, "mem=", 4))
+ parse_memopt(from+4, &from);
+
+#ifdef CONFIG_DISCONTIGMEM
+ if (!memcmp(from, "numa=", 5))
+ numa_setup(from+5);
+#endif
+
+#ifdef CONFIG_GART_IOMMU
+ if (!memcmp(from,"iommu=",6)) {
+ iommu_setup(from+6);
+ }
+#endif
+
+ if (!memcmp(from,"oops=panic", 10))
+ panic_on_oops = 1;
+
+ if (!memcmp(from, "noexec=", 7))
+ nonx_setup(from + 7);
+
+ next_char:
+ c = *(from++);
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *(to++) = c;
+ }
+ *to = '\0';
+ *cmdline_p = command_line;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init contig_initmem_init(void)
+{
+ unsigned long bootmap_size, bootmap;
+
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+
+ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
+ bootmap = start_pfn;
+ bootmap_size = init_bootmem(bootmap, end_pfn);
+ reserve_bootmem(bootmap, bootmap_size);
+
+ free_bootmem(start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << PAGE_SHIFT);
+ printk("Registering memory for bootmem: from %lx, size = %lx\n",
+ start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << PAGE_SHIFT);
+ /*
+ * This should cover kernel_end
+ */
+#if 0
+ reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
+#endif
+ reserve_bootmem(0, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1));
+
+}
+#endif
+
+/* Use inline assembly to define this because the nops are defined
+ as inline assembly strings in the include files and we cannot
+ get them easily into strings. */
+asm("\t.data\nk8nops: "
+ K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
+ K8_NOP7 K8_NOP8);
+
+extern unsigned char k8nops[];
+static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ k8nops,
+ k8nops + 1,
+ k8nops + 1 + 2,
+ k8nops + 1 + 2 + 3,
+ k8nops + 1 + 2 + 3 + 4,
+ k8nops + 1 + 2 + 3 + 4 + 5,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+};
+
+/* Replace instructions with better alternatives for this CPU type.
+
+ This runs before SMP is initialized to avoid SMP problems with
+ self modifying code. This implies that assymetric systems where
+ APs have less capabilities than the boot processor are not handled.
+ In this case boot with "noreplacement". */
+void apply_alternatives(void *start, void *end)
+{
+ struct alt_instr *a;
+ int diff, i, k;
+ for (a = start; (void *)a < end; a++) {
+ if (!boot_cpu_has(a->cpuid))
+ continue;
+
+ BUG_ON(a->replacementlen > a->instrlen);
+ __inline_memcpy(a->instr, a->replacement, a->replacementlen);
+ diff = a->instrlen - a->replacementlen;
+
+ /* Pad the rest with nops */
+ for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
+ k = diff;
+ if (k > ASM_NOP_MAX)
+ k = ASM_NOP_MAX;
+ __inline_memcpy(a->instr + i, k8_nops[k], k);
+ }
+ }
+}
+
+static int no_replacement __initdata = 0;
+
+void __init alternative_instructions(void)
+{
+ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+ if (no_replacement)
+ return;
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
+
+static int __init noreplacement_setup(char *s)
+{
+ no_replacement = 1;
+ return 0;
+}
+
+__setup("noreplacement", noreplacement_setup);
+
+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+struct edd edd;
+#ifdef CONFIG_EDD_MODULE
+EXPORT_SYMBOL(edd);
+#endif
+/**
+ * copy_edd() - Copy the BIOS EDD information
+ * from boot_params into a safe place.
+ *
+ */
+static inline void copy_edd(void)
+{
+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
+ edd.edd_info_nr = EDD_NR;
+}
+#else
+static inline void copy_edd(void)
+{
+}
+#endif
+
+#if 0
+#define EBDA_ADDR_POINTER 0x40E
+static void __init reserve_ebda_region(void)
+{
+ unsigned int addr;
+ /**
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E
+ */
+ addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
+ addr <<= 4;
+ if (addr)
+ reserve_bootmem_generic(addr, PAGE_SIZE);
+}
+#endif
+
+/*
+ * Guest physical starts from 0.
+ */
+
+unsigned long __init xen_end_of_ram(void)
+{
+ unsigned long max_end_pfn = xen_start_info.nr_pages;
+
+ if ( xen_override_max_pfn < max_end_pfn)
+ xen_override_max_pfn = max_end_pfn;
+
+ return xen_override_max_pfn;
+}
+
+static void __init print_memory_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ early_printk(" %s: %016Lx - %016Lx ", who,
+ e820.map[i].addr,
+ e820.map[i].addr + e820.map[i].size);
+ switch (e820.map[i].type) {
+ case E820_RAM: early_printk("(usable)\n");
+ break;
+ case E820_RESERVED:
+ early_printk("(reserved)\n");
+ break;
+ case E820_ACPI:
+ early_printk("(ACPI data)\n");
+ break;
+ case E820_NVS:
+ early_printk("(ACPI NVS)\n");
+ break;
+ default: early_printk("type %u\n", e820.map[i].type);
+ break;
+ }
+ }
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ unsigned long low_mem_size;
+ int i,j;
+#if 0
+ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+#else
+ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
+#endif
+ drive_info = DRIVE_INFO;
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ screen_info = SCREEN_INFO;
+#endif
+ edid_info = EDID_INFO;
+ aux_device_present = AUX_DEVICE_INFO;
+ saved_video_mode = SAVED_VIDEO_MODE;
+ bootloader_type = LOADER_TYPE;
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+/* register_console(&xen_console); */
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* This is drawn from a dump from vgacon:startup in standard Linux. */
+ screen_info.orig_video_mode = 3;
+ screen_info.orig_video_isVGA = 1;
+ screen_info.orig_video_lines = 25;
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_ega_bx = 3;
+ screen_info.orig_video_points = 16;
+#endif
+ ARCH_SETUP
+ print_memory_map(machine_specific_memory_setup());
+
+ /* copy_edd(); */
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+ init_mm.start_code = (unsigned long) &_text;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+/* init_mm.brk = (unsigned long) &_end; */
+ init_mm.brk = start_pfn << PAGE_SHIFT;
+
+
+#if 0 /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
+ code_resource.start = virt_to_phys(&_text);
+ code_resource.end = virt_to_phys(&_etext)-1;
+ data_resource.start = virt_to_phys(&_etext);
+ data_resource.end = virt_to_phys(&_edata)-1;
+#endif
+ parse_cmdline_early(cmdline_p);
+
+ early_identify_cpu(&boot_cpu_data);
+
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+#if 0
+ end_pfn = e820_end_of_ram();
+#else
+ end_pfn = xen_end_of_ram();
+#endif
+
+ check_efer();
+
+ init_memory_mapping(0, (end_pfn << PAGE_SHIFT));
+
+#ifdef CONFIG_ACPI_BOOT
+ /*
+ * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
+ * Call this early for SRAT node setup.
+ */
+ acpi_boot_table_init();
+#endif
+
+#ifdef CONFIG_ACPI_NUMA
+ /*
+ * Parse SRAT to discover nodes.
+ */
+ acpi_numa_init();
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+ numa_initmem_init(0, end_pfn);
+#else
+ contig_initmem_init();
+#endif
+
+ /* Reserve direct mapping and shared info etc. */
+// reserve_bootmem_generic(table_start << PAGE_SHIFT, (table_end + 1 - table_start) << PAGE_SHIFT);
+
+// reserve_bootmem_generic(0, (table_end + 1) << PAGE_SHIFT);
+
+ /* reserve kernel */
+// kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
+
+#if 0
+ /*
+ * reserve physical page 0 - it's a special BIOS page on many boxes,
+ * enabling clean reboots, SMP operation, laptop functions.
+ */
+ reserve_bootmem_generic(0, PAGE_SIZE);
+#endif
+
+ /* reserve ebda region */
+/* reserve_ebda_region(); */
+
+#ifdef CONFIG_SMP
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
+
+ /* Reserve SMP trampoline */
+ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
+#endif
+
+#ifdef CONFIG_ACPI_SLEEP
+ /*
+ * Reserve low memory region for sleep support.
+ */
+ acpi_reserve_bootmem();
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * Find and reserve possible boot-time SMP configuration:
+ */
+ find_smp_config();
+#endif
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (xen_start_info.mod_start) {
+ if (LOADER_TYPE && INITRD_START) {
+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
+ /* reserve_bootmem_generic(INITRD_START, INITRD_SIZE); */
+ initrd_start = INITRD_START + PAGE_OFFSET;
+ initrd_end = initrd_start+INITRD_SIZE;
+ initrd_below_start_ok = 1;
+ }
+ else {
+ printk(KERN_ERR "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ (unsigned long)(INITRD_START + INITRD_SIZE),
+ (unsigned long)(end_pfn << PAGE_SHIFT));
+ initrd_start = 0;
+ }
+ }
+ }
+#endif
+ paging_init();
+
+ /* Make sure we have a large enough P->M table. */
+ if (end_pfn > xen_start_info.nr_pages) {
+ phys_to_machine_mapping = alloc_bootmem(
+ max_pfn * sizeof(unsigned long));
+ memset(phys_to_machine_mapping, ~0,
+ max_pfn * sizeof(unsigned long));
+ memcpy(phys_to_machine_mapping,
+ (unsigned long *)xen_start_info.mfn_list,
+ xen_start_info.nr_pages * sizeof(unsigned long));
+ free_bootmem(
+ __pa(xen_start_info.mfn_list),
+ PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
+ sizeof(unsigned long))));
+ }
+
+ pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE);
+
+ for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ {
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ }
+
+#if 0
+ check_ioapic();
+#endif
+
+#ifdef CONFIG_ACPI_BOOT
+ /*
+ * Read APIC and some other early information from ACPI tables.
+ */
+ acpi_boot_init();
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * get boot-time SMP configuration:
+ */
+ if (smp_found_config)
+ get_smp_config();
+ init_apic_mappings();
+#endif
+
+ /* XXX Disable irqdebug until we have a way to avoid interrupt
+ * conflicts. */
+/* noirqdebug_setup(""); */
+ /* If we are a privileged guest OS then we should request IO privs. */
+ if (xen_start_info.flags & SIF_PRIVILEGED) {
+ dom0_op_t op;
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = 3;
+ if (HYPERVISOR_dom0_op(&op) != 0)
+ panic("Unable to obtain IOPL, despite SIF_PRIVILEGED");
+ current->thread.io_pl = 3;
+ }
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+ probe_roms();
+#endif
+/* e820_reserve_resources(); */
+
+ request_resource(&iomem_resource, &video_ram_resource);
+
+ {
+ unsigned i;
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+ }
+
+ /* Will likely break when you have unassigned resources with more
+ than 4GB memory and bridges that don't support more than 4GB.
+ Doing it properly would require to use pci_alloc_consistent
+ in this case. */
+ low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
+ if (low_mem_size > pci_mem_start)
+ pci_mem_start = low_mem_size;
+
+#ifdef CONFIG_GART_IOMMU
+ iommu_hole_init();
+#endif
+
+ if (xen_start_info.flags & SIF_INITDOMAIN) {
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ panic("Xen granted us console access "
+ "but not privileged status");
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+ } else {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ extern const struct consw xennull_con;
+ extern int console_use_vt;
+#if defined(CONFIG_VGA_CONSOLE)
+ /* disable VGA driver */
+ ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
+#endif
+ conswitchp = &xennull_con;
+ console_use_vt = 0;
+#endif
+ }
+}
+
+static int __init get_model_name(struct cpuinfo_x86 *c)
+{
+ unsigned int *v;
+
+ if (c->x86_cpuid_level < 0x80000004)
+ return 0;
+
+ v = (unsigned int *) c->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ c->x86_model_id[48] = 0;
+ return 1;
+}
+
+
+static void __init display_cacheinfo(struct cpuinfo_x86 *c)
+{
+ unsigned int n, dummy, eax, ebx, ecx, edx;
+
+ n = c->x86_cpuid_level;
+
+ if (n >= 0x80000005) {
+ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ c->x86_cache_size=(ecx>>24)+(edx>>24);
+ /* On K8 L1 TLB is inclusive, so don't count it */
+ c->x86_tlbsize = 0;
+ }
+
+ if (n >= 0x80000006) {
+ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
+ ecx = cpuid_ecx(0x80000006);
+ c->x86_cache_size = ecx >> 16;
+ c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
+
+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+ c->x86_cache_size, ecx & 0xFF);
+ }
+
+ if (n >= 0x80000007)
+ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
+ if (n >= 0x80000008) {
+ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
+ }
+}
+
+
+static int __init init_amd(struct cpuinfo_x86 *c)
+{
+ int r;
+ int level;
+#ifdef CONFIG_NUMA
+ int cpu;
+#endif
+
+ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+ clear_bit(0*32+31, &c->x86_capability);
+
+ /* C-stepping K8? */
+ level = cpuid_eax(1);
+ if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
+ set_bit(X86_FEATURE_K8_C, &c->x86_capability);
+
+ r = get_model_name(c);
+ if (!r) {
+ switch (c->x86) {
+ case 15:
+ /* Should distinguish Models here, but this is only
+ a fallback anyways. */
+ strcpy(c->x86_model_id, "Hammer");
+ break;
+ }
+ }
+ display_cacheinfo(c);
+
+ if (c->x86_cpuid_level >= 0x80000008) {
+ c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+ if (c->x86_num_cores & (c->x86_num_cores - 1))
+ c->x86_num_cores = 1;
+
+#ifdef CONFIG_NUMA
+ /* On a dual core setup the lower bits of apic id
+ distingush the cores. Fix up the CPU<->node mappings
+ here based on that.
+ Assumes number of cores is a power of two.
+ When using SRAT use mapping from SRAT. */
+ cpu = c->x86_apicid;
+ if (acpi_numa <= 0 && c->x86_num_cores > 1) {
+ cpu_to_node[cpu] = cpu >> hweight32(c->x86_num_cores - 1);
+ if (!node_online(cpu_to_node[cpu]))
+ cpu_to_node[cpu] = first_node(node_online_map);
+ }
+ printk(KERN_INFO "CPU %d(%d) -> Node %d\n",
+ cpu, c->x86_num_cores, cpu_to_node[cpu]);
+#endif
+ }
+
+ return r;
+}
+
+static void __init detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ u32 eax, ebx, ecx, edx;
+ int index_lsb, index_msb, tmp;
+ int cpu = smp_processor_id();
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+ return;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
+ if (smp_num_siblings == 1) {
+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
+ } else if (smp_num_siblings > 1) {
+ index_lsb = 0;
+ index_msb = 31;
+ /*
+ * At this point we only support two siblings per
+ * processor package.
+ */
+ if (smp_num_siblings > NR_CPUS) {
+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
+ smp_num_siblings = 1;
+ return;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 1) == 0) {
+ tmp >>=1 ;
+ index_lsb++;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 0x80000000 ) == 0) {
+ tmp <<=1 ;
+ index_msb--;
+ }
+ if (index_lsb != index_msb )
+ index_msb++;
+ phys_proc_id[cpu] = phys_pkg_id(index_msb);
+
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
+ }
+#endif
+}
+
+static void __init sched_cmp_hack(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ /* AMD dual core looks like HT but isn't really. Hide it from the
+ scheduler. This works around problems with the domain scheduler.
+ Also probably gives slightly better scheduling and disables
+ SMT nice which is harmful on dual core.
+ TBD tune the domain scheduler for dual core. */
+ if (c->x86_vendor == X86_VENDOR_AMD && cpu_has(c, X86_FEATURE_CMP_LEGACY))
+ smp_num_siblings = 1;
+#endif
+}
+
+static void __init init_intel(struct cpuinfo_x86 *c)
+{
+ /* Cache sizes */
+ unsigned n;
+
+ init_intel_cacheinfo(c);
+ n = c->x86_cpuid_level;
+ if (n >= 0x80000008) {
+ unsigned eax = cpuid_eax(0x80000008);
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
+ }
+
+ if (c->x86 == 15)
+ c->x86_cache_alignment = c->x86_clflush_size * 2;
+}
+
+void __init get_cpu_vendor(struct cpuinfo_x86 *c)
+{
+ char *v = c->x86_vendor_id;
+
+ if (!strcmp(v, "AuthenticAMD"))
+ c->x86_vendor = X86_VENDOR_AMD;
+ else if (!strcmp(v, "GenuineIntel"))
+ c->x86_vendor = X86_VENDOR_INTEL;
+ else
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+}
+
+struct cpu_model_info {
+ int vendor;
+ int family;
+ char *model_names[16];
+};
+
+/* Do some early cpuid on the boot CPU to get some parameter that are
+ needed before check_bugs. Everything advanced is in identify_cpu
+ below. */
+void __init early_identify_cpu(struct cpuinfo_x86 *c)
+{
+ u32 tfms;
+
+ c->loops_per_jiffy = loops_per_jiffy;
+ c->x86_cache_size = -1;
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ c->x86_clflush_size = 64;
+ c->x86_cache_alignment = c->x86_clflush_size;
+ c->x86_num_cores = 1;
+ c->x86_apicid = c == &boot_cpu_data ? 0 : c - cpu_data;
+ c->x86_cpuid_level = 0;
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+ /* Get vendor name */
+ cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
+ (unsigned int *)&c->x86_vendor_id[0],
+ (unsigned int *)&c->x86_vendor_id[8],
+ (unsigned int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c);
+
+ /* Initialize the standard set of capabilities */
+ /* Note that the vendor-specific code below might override */
+
+ /* Intel-defined flags: level 0x00000001 */
+ if (c->cpuid_level >= 0x00000001) {
+ __u32 misc;
+ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
+ &c->x86_capability[0]);
+ c->x86 = (tfms >> 8) & 0xf;
+ c->x86_model = (tfms >> 4) & 0xf;
+ c->x86_mask = tfms & 0xf;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ if (c->x86_capability[0] & (1<<19))
+ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
+ c->x86_apicid = misc >> 24;
+ } else {
+ /* Have CPUID level 0 only - unheard of */
+ c->x86 = 4;
+ }
+}
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+ int i;
+ u32 xlvl;
+
+ early_identify_cpu(c);
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ c->x86_cpuid_level = xlvl;
+ if ((xlvl & 0xffff0000) == 0x80000000) {
+ if (xlvl >= 0x80000001) {
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ c->x86_capability[5] = cpuid_ecx(0x80000001);
+ }
+ if (xlvl >= 0x80000004)
+ get_model_name(c); /* Default name */
+ }
+
+ /* Transmeta-defined flags: level 0x80860001 */
+ xlvl = cpuid_eax(0x80860000);
+ if ((xlvl & 0xffff0000) == 0x80860000) {
+ /* Don't set x86_cpuid_level here for now to not confuse. */
+ if (xlvl >= 0x80860001)
+ c->x86_capability[2] = cpuid_edx(0x80860001);
+ }
+
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+ * features a certain CPU supports which CPUID doesn't
+ * tell us, CPUID claiming incorrect flags, or other bugs,
+ * we handle them here.
+ *
+ * At the end of this section, c->x86_capability better
+ * indicate the features this CPU genuinely supports!
+ */
+ switch (c->x86_vendor) {
+ case X86_VENDOR_AMD:
+ init_amd(c);
+ break;
+
+ case X86_VENDOR_INTEL:
+ init_intel(c);
+ break;
+
+ case X86_VENDOR_UNKNOWN:
+ default:
+ display_cacheinfo(c);
+ break;
+ }
+
+ select_idle_routine(c);
+ detect_ht(c);
+ sched_cmp_hack(c);
+
+ /*
+ * On SMP, boot_cpu_data holds the common feature set between
+ * all CPUs; so make sure that we indicate which features are
+ * common between the CPUs. The first time this routine gets
+ * executed, c == &boot_cpu_data.
+ */
+ if (c != &boot_cpu_data) {
+ /* AND the already accumulated flags with these */
+ for (i = 0 ; i < NCAPINTS ; i++)
+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+ }
+
+#ifdef CONFIG_X86_MCE
+ mcheck_init(c);
+#endif
+#ifdef CONFIG_NUMA
+ if (c != &boot_cpu_data)
+ numa_add_cpu(c - cpu_data);
+#endif
+}
+
+
+void __init print_cpu_info(struct cpuinfo_x86 *c)
+{
+ if (c->x86_model_id[0])
+ printk("%s", c->x86_model_id);
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ printk(" stepping %02x\n", c->x86_mask);
+ else
+ printk("\n");
+}
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ struct cpuinfo_x86 *c = v;
+
+ /*
+ * These flag bits must match the definitions in <asm/cpufeature.h>.
+ * NULL means this bit is undefined or reserved; either way it doesn't
+ * have meaning as far as Linux is concerned. Note that it's important
+ * to realize there is a difference between this table and CPUID -- if
+ * applications want to get the raw CPUID data, they should access
+ * /dev/cpu/<cpu_nr>/cpuid instead.
+ */
+ static char *x86_cap_flags[] = {
+ /* Intel-defined */
+ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
+ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
+ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
+
+ /* AMD-defined */
+ "pni", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
+ NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
+
+ /* Transmeta-defined */
+ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Other (Linux-defined) */
+ "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Intel-defined (#2) */
+ "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
+ "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* AMD-defined (#2) */
+ "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+ };
+ static char *x86_power_flags[] = {
+ "ts", /* temperature sensor */
+ "fid", /* frequency id control */
+ "vid", /* voltage id control */
+ "ttp", /* thermal trip */
+ };
+
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(c-cpu_data))
+ return 0;
+#endif
+
+ seq_printf(m,"processor\t: %u\n"
+ "vendor_id\t: %s\n"
+ "cpu family\t: %d\n"
+ "model\t\t: %d\n"
+ "model name\t: %s\n",
+ (unsigned)(c-cpu_data),
+ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
+ c->x86,
+ (int)c->x86_model,
+ c->x86_model_id[0] ? c->x86_model_id : "unknown");
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
+ else
+ seq_printf(m, "stepping\t: unknown\n");
+
+ if (cpu_has(c,X86_FEATURE_TSC)) {
+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+ cpu_khz / 1000, (cpu_khz % 1000));
+ }
+
+ /* Cache size */
+ if (c->x86_cache_size >= 0)
+ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
+
+#ifdef CONFIG_SMP
+ seq_printf(m, "physical id\t: %d\n", phys_proc_id[c - cpu_data]);
+ seq_printf(m, "siblings\t: %d\n", c->x86_num_cores * smp_num_siblings);
+#endif
+
+ seq_printf(m,
+ "fpu\t\t: yes\n"
+ "fpu_exception\t: yes\n"
+ "cpuid level\t: %d\n"
+ "wp\t\t: yes\n"
+ "flags\t\t:",
+ c->cpuid_level);
+
+ {
+ int i;
+ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
+ if ( test_bit(i, &c->x86_capability) &&
+ x86_cap_flags[i] != NULL )
+ seq_printf(m, " %s", x86_cap_flags[i]);
+ }
+
+ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
+ c->loops_per_jiffy/(500000/HZ),
+ (c->loops_per_jiffy/(5000/HZ)) % 100);
+
+ if (c->x86_tlbsize > 0)
+ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
+ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
+ seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
+
+ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
+ c->x86_phys_bits, c->x86_virt_bits);
+
+ seq_printf(m, "power management:");
+ {
+ unsigned i;
+ for (i = 0; i < 32; i++)
+ if (c->x86_power & (1 << i)) {
+ if (i < ARRAY_SIZE(x86_power_flags))
+ seq_printf(m, " %s", x86_power_flags[i]);
+ else
+ seq_printf(m, " [%d]", i);
+ }
+ }
+ seq_printf(m, "\n");
+
+ if (c->x86_num_cores > 1)
+ seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
+
+ seq_printf(m, "\n\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start =c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
--- /dev/null
+/*
+ * X86-64 specific CPU setup.
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
+ * See setup.c for older changelog.
+ * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen
+ *
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/bitops.h>
+#include <asm/pda.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/i387.h>
+#include <asm/percpu.h>
+#include <asm/mtrr.h>
+#include <asm/proto.h>
+#include <asm/mman.h>
+#include <asm/numa.h>
+
+#include <asm-xen/hypervisor.h>
+
+char x86_boot_params[2048] __initdata = {0,};
+
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned;
+
+extern struct task_struct init_task;
+
+extern unsigned char __per_cpu_start[], __per_cpu_end[];
+
+extern struct desc_ptr cpu_gdt_descr[];
+struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
+
+char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
+
+unsigned long __supported_pte_mask = ~0UL;
+static int do_not_nx __initdata = 0;
+
+/* noexec=on|off
+Control non executable mappings for 64bit processes.
+
+on Enable(default)
+off Disable
+*/
+int __init nonx_setup(char *str)
+{
+ if (!strcmp(str, "on")) {
+ __supported_pte_mask |= _PAGE_NX;
+ do_not_nx = 0;
+ } else if (!strncmp(str, "off", 3)) {
+ do_not_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+ return 0;
+}
+__setup("noexec=", nonx_setup); /* parsed early actually */
+
+int force_personality32 = READ_IMPLIES_EXEC;
+
+/* noexec32=on|off
+Control non executable heap for 32bit processes.
+To control the stack too use noexec=off
+
+on PROT_READ does not imply PROT_EXEC for 32bit processes
+off PROT_READ implies PROT_EXEC (default)
+*/
+static int __init nonx32_setup(char *str)
+{
+ if (!strcmp(str, "on"))
+ force_personality32 &= ~READ_IMPLIES_EXEC;
+ else if (!strcmp(str, "off"))
+ force_personality32 |= READ_IMPLIES_EXEC;
+ return 0;
+}
+__setup("noexec32=", nonx32_setup);
+
+/*
+ * Great future plan:
+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
+ * Always point %gs to its beginning
+ */
+void __init setup_per_cpu_areas(void)
+{
+ int i;
+ unsigned long size;
+
+ /* Copy section for each CPU (we discard the original) */
+ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
+#ifdef CONFIG_MODULES
+ if (size < PERCPU_ENOUGH_ROOM)
+ size = PERCPU_ENOUGH_ROOM;
+#endif
+
+ for (i = 0; i < NR_CPUS; i++) {
+ unsigned char *ptr;
+
+ if (!NODE_DATA(cpu_to_node(i))) {
+ printk("cpu with no node %d, num_online_nodes %d\n",
+ i, num_online_nodes());
+ ptr = alloc_bootmem(size);
+ } else {
+ ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
+ }
+ if (!ptr)
+ panic("Cannot allocate cpu data for CPU %d\n", i);
+ cpu_pda[i].data_offset = ptr - __per_cpu_start;
+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ }
+}
+
+void pda_init(int cpu)
+{
+ pgd_t *old_level4 = (pgd_t *)xen_start_info.pt_base;
+ struct x8664_pda *pda = &cpu_pda[cpu];
+
+ /* Setup up data that may be needed in __get_free_pages early */
+ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
+ HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL,
+ (unsigned long)(cpu_pda + cpu));
+
+ pda->me = pda;
+ pda->cpunumber = cpu;
+ pda->irqcount = -1;
+ pda->kernelstack =
+ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
+ pda->active_mm = &init_mm;
+ pda->mmu_state = 0;
+ pda->kernel_mode = 1;
+
+ if (cpu == 0) {
+ memcpy((void *)init_level4_pgt,
+ (void *) xen_start_info.pt_base, PAGE_SIZE);
+ /* others are initialized in smpboot.c */
+ pda->pcurrent = &init_task;
+ pda->irqstackptr = boot_cpu_stack;
+ make_page_readonly(init_level4_pgt);
+ make_page_readonly(init_level4_user_pgt);
+ make_page_readonly(level3_user_pgt); /* for vsyscall stuff */
+ xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
+ xen_pud_pin(__pa_symbol(level3_user_pgt));
+ set_pgd((pgd_t *)(init_level4_user_pgt + 511),
+ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
+ } else {
+ pda->irqstackptr = (char *)
+ __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
+ if (!pda->irqstackptr)
+ panic("cannot allocate irqstack for cpu %d", cpu);
+ }
+
+ xen_pt_switch(__pa(init_level4_pgt));
+ xen_new_user_pt(__pa(init_level4_user_pgt));
+
+ if (cpu == 0) {
+ xen_pgd_unpin(__pa(old_level4));
+#if 0
+ early_printk("__pa: %x, <machine_phys> old_level 4 %x\n",
+ __pa(xen_start_info.pt_base),
+ pfn_to_mfn(__pa(old_level4) >> PAGE_SHIFT));
+#endif
+// make_page_writable(old_level4);
+// free_bootmem(__pa(old_level4), PAGE_SIZE);
+ }
+
+ pda->irqstackptr += IRQSTACKSIZE-64;
+}
+
+char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ]
+__attribute__((section(".bss.page_aligned")));
+
+/* May not be marked __init: used by software suspend */
+void syscall_init(void)
+{
+#ifdef CONFIG_IA32_EMULATION
+ syscall32_cpu_init ();
+#endif
+}
+
+void __init check_efer(void)
+{
+ unsigned long efer;
+
+ /* rdmsrl(MSR_EFER, efer); */
+
+ /*
+ * At this point, Xen does not like the bit 63.
+ * So NX is not supported. Come back later.
+ */
+ efer = 0;
+
+ if (!(efer & EFER_NX) || do_not_nx) {
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+}
+
+void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
+{
+ unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
+ unsigned long va;
+ int f;
+
+ for (va = gdt_descr->address, f = 0;
+ va < gdt_descr->address + gdt_descr->size;
+ va += PAGE_SIZE, f++) {
+ frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+ make_page_readonly((void *)va);
+ }
+ flush_page_update_queue();
+ if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
+ sizeof (struct desc_struct)))
+ BUG();
+}
+
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ * A lot of state is already set up in PDA init.
+ */
+void __init cpu_init (void)
+{
+#ifdef CONFIG_SMP
+ int cpu = stack_smp_processor_id();
+#else
+ int cpu = smp_processor_id();
+#endif
+ struct tss_struct *t = &per_cpu(init_tss, cpu);
+ unsigned long v;
+ char *estacks = NULL;
+ struct task_struct *me;
+ int i;
+
+ /* CPU 0 is initialised in head64.c */
+ if (cpu != 0) {
+ pda_init(cpu);
+ } else
+ estacks = boot_exception_stacks;
+
+ me = current;
+
+ if (test_and_set_bit(cpu, &cpu_initialized))
+ panic("CPU#%d already initialized!\n", cpu);
+
+ printk("Initializing CPU#%d\n", cpu);
+
+#if 0
+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+#endif
+ /*
+ * Initialize the per-CPU GDT with the boot GDT,
+ * and set up the GDT descriptor:
+ */
+ if (cpu) {
+ memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
+ }
+
+ cpu_gdt_descr[cpu].size = GDT_SIZE;
+ cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
+#if 0
+ asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
+ asm volatile("lidt %0" :: "m" (idt_descr));
+#endif
+ cpu_gdt_init(&cpu_gdt_descr[cpu]);
+
+#if 0
+ memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
+
+#endif
+ memcpy(me->thread.tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
+ GDT_ENTRY_TLS_ENTRIES * 8);
+
+ /*
+ * Delete NT
+ */
+
+ asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
+
+ if (cpu == 0)
+ early_identify_cpu(&boot_cpu_data);
+
+ syscall_init();
+
+ barrier();
+ check_efer();
+
+ /*
+ * set up and load the per-CPU TSS
+ */
+ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
+ if (cpu) {
+ estacks = (char *)__get_free_pages(GFP_ATOMIC,
+ EXCEPTION_STACK_ORDER);
+ if (!estacks)
+ panic("Cannot allocate exception stack %ld %d\n",
+ v, cpu);
+ }
+ estacks += EXCEPTION_STKSZ;
+ t->ist[v] = (unsigned long)estacks;
+ }
+
+ t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+ /*
+ * <= is required because the CPU will access up to
+ * 8 bits beyond the end of the IO permission bitmap.
+ */
+ for (i = 0; i <= IO_BITMAP_LONGS; i++)
+ t->io_bitmap[i] = ~0UL;
+
+ atomic_inc(&init_mm.mm_count);
+ me->active_mm = &init_mm;
+ if (me->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, me);
+
+ load_LDT(&init_mm.context);
+ flush_page_update_queue();
+
+#if 0
+ /*
+ * Clear all 6 debug registers:
+ */
+#define CD(register) HYPERVISOR_set_debugreg(register, 0)
+
+ CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
+
+#undef CD
+#endif
+#if 0
+ fpu_init();
+#endif
+
+#ifdef CONFIG_NUMA
+ numa_add_cpu(cpu);
+#endif
+}
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
+ * 2000-2002 x86-64 support by Andi Kleen
+ *
+ * $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/compiler.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/i387.h>
+#include <asm/proto.h>
+
+/* #define DEBUG_SIG 1 */
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs * regs);
+void ia32_setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs * regs);
+
+asmlinkage long
+sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+#ifdef DEBUG_SIG
+ printk("rt_sigsuspend savset(%lx) newset(%lx) regs(%p) rip(%lx)\n",
+ saveset, newset, regs, regs->rip);
+#endif
+ regs->rax = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage long
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ struct pt_regs *regs)
+{
+ return do_sigaltstack(uss, uoss, regs->rsp);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct rt_sigframe
+{
+ char *pretcode;
+ struct ucontext uc;
+ struct siginfo info;
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *prax)
+{
+ unsigned int err = 0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->x)
+
+ COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx);
+ COPY(rdx); COPY(rcx); COPY(rip);
+ COPY(r8);
+ COPY(r9);
+ COPY(r10);
+ COPY(r11);
+ COPY(r12);
+ COPY(r13);
+ COPY(r14);
+ COPY(r15);
+
+ {
+ unsigned int tmpflags;
+ err |= __get_user(tmpflags, &sc->eflags);
+ regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
+ regs->orig_rax = -1; /* disable syscall checks */
+ }
+
+ {
+ struct _fpstate __user * buf;
+ err |= __get_user(buf, &sc->fpstate);
+
+ if (buf) {
+ if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+ goto badframe;
+ err |= restore_i387(buf);
+ } else {
+ struct task_struct *me = current;
+ if (used_math()) {
+ clear_fpu(me);
+ clear_used_math();
+ }
+ }
+ }
+
+ err |= __get_user(*prax, &sc->rax);
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+ unsigned long eax;
+
+ frame = (struct rt_sigframe __user *)(regs->rsp - 8);
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame))) {
+ goto badframe;
+ }
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) {
+ goto badframe;
+ }
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax)) {
+ goto badframe;
+ }
+
+#ifdef DEBUG_SIG
+ printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs.rip,regs.rsp,frame,eax);
+#endif
+
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->rsp) == -EFAULT)
+ goto badframe;
+
+ return eax;
+
+badframe:
+ signal_fault(regs,frame,"sigreturn");
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static inline int
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me)
+{
+ int err = 0;
+ unsigned long eflags;
+
+ err |= __put_user(0, &sc->gs);
+ err |= __put_user(0, &sc->fs);
+
+ err |= __put_user(regs->rdi, &sc->rdi);
+ err |= __put_user(regs->rsi, &sc->rsi);
+ err |= __put_user(regs->rbp, &sc->rbp);
+ err |= __put_user(regs->rsp, &sc->rsp);
+ err |= __put_user(regs->rbx, &sc->rbx);
+ err |= __put_user(regs->rdx, &sc->rdx);
+ err |= __put_user(regs->rcx, &sc->rcx);
+ err |= __put_user(regs->rax, &sc->rax);
+ err |= __put_user(regs->r8, &sc->r8);
+ err |= __put_user(regs->r9, &sc->r9);
+ err |= __put_user(regs->r10, &sc->r10);
+ err |= __put_user(regs->r11, &sc->r11);
+ err |= __put_user(regs->r12, &sc->r12);
+ err |= __put_user(regs->r13, &sc->r13);
+ err |= __put_user(regs->r14, &sc->r14);
+ err |= __put_user(regs->r15, &sc->r15);
+ err |= __put_user(me->thread.trap_no, &sc->trapno);
+ err |= __put_user(me->thread.error_code, &sc->err);
+ err |= __put_user(regs->rip, &sc->rip);
+ eflags = regs->eflags;
+ if (current->ptrace & PT_PTRACED) {
+ eflags &= ~TF_MASK;
+ }
+ err |= __put_user(eflags, &sc->eflags);
+ err |= __put_user(mask, &sc->oldmask);
+ err |= __put_user(me->thread.cr2, &sc->cr2);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+
+static void __user *
+get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
+{
+ unsigned long rsp;
+
+ /* Default to using normal stack - redzone*/
+ rsp = regs->rsp - 128;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ /* RED-PEN: redzone on that stack? */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (sas_ss_flags(rsp) == 0)
+ rsp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ return (void __user *)round_down(rsp - size, 16);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs * regs)
+{
+ struct rt_sigframe __user *frame;
+ struct _fpstate __user *fp = NULL;
+ int err = 0;
+ struct task_struct *me = current;
+
+ if (used_math()) {
+ fp = get_stack(ka, regs, sizeof(struct _fpstate));
+ frame = (void __user *)round_down((unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
+
+ if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) {
+ goto give_sigsegv;
+ }
+
+ if (save_i387(fp) < 0)
+ err |= -1;
+ } else {
+ frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
+ }
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
+ goto give_sigsegv;
+ }
+
+ if (ka->sa.sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, info);
+ if (err) {
+ goto give_sigsegv;
+ }
+ }
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->rsp),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
+ err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
+ if (sizeof(*set) == 16) {
+ __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
+ __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
+ } else {
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ }
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ /* x86-64 should always use SA_RESTORER. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
+ } else {
+ /* could use a vstub here */
+ goto give_sigsegv;
+ }
+
+ if (err) {
+ goto give_sigsegv;
+ }
+
+#ifdef DEBUG_SIG
+ printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax);
+#endif
+
+ /* Set up registers for signal handler */
+ {
+ struct exec_domain *ed = current_thread_info()->exec_domain;
+ if (unlikely(ed && ed->signal_invmap && sig < 32))
+ sig = ed->signal_invmap[sig];
+ }
+ regs->rdi = sig;
+ /* In case the signal handler was declared without prototypes */
+ regs->rax = 0;
+
+ /* This also works for non SA_SIGINFO handlers because they expect the
+ next argument after the signal number on the stack. */
+ regs->rsi = (unsigned long)&frame->info;
+ regs->rdx = (unsigned long)&frame->uc;
+ regs->rip = (unsigned long) ka->sa.sa_handler;
+
+ regs->rsp = (unsigned long)frame;
+
+ set_fs(USER_DS);
+ if (regs->eflags & TF_MASK) {
+ if ((current->ptrace & (PT_PTRACED | PT_DTRACE)) == (PT_PTRACED | PT_DTRACE)) {
+ ptrace_notify(SIGTRAP);
+ } else {
+ regs->eflags &= ~TF_MASK;
+ }
+ }
+
+#ifdef DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+ current->comm, current->pid, frame, regs->rip, frame->pretcode);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+#ifdef DEBUG_SIG
+ printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n", current->pid, sig,
+ regs->rip, regs->rsp, regs);
+#endif
+
+ /* Are we from a system call? */
+ if ((long)regs->orig_rax >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->rax) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->rax = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->rax = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->rax = regs->orig_rax;
+ regs->rip -= 2;
+ }
+ }
+
+#ifdef CONFIG_IA32_EMULATION
+ if (test_thread_flag(TIF_IA32)) {
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ ia32_setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ ia32_setup_frame(sig, ka, oldset, regs);
+ } else
+#endif
+ setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(¤t->sighand->siglock);
+ sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if ((regs->cs & 2) != 2) {
+ return 1;
+ }
+
+ if (try_to_freeze(0))
+ goto no_signal;
+
+ if (!oldset)
+ oldset = ¤t->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Reenable any watchpoints before delivering the
+ * signal to user space. The processor register will
+ * have been cleared if the watchpoint triggered
+ * inside the kernel.
+ */
+ if (current->thread.debugreg7) {
+ HYPERVISOR_set_debugreg(7,
+ current->thread.debugreg7);
+ }
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, oldset, regs);
+ return 1;
+ }
+
+ no_signal:
+ /* Did we come from a system call? */
+ if ((long)regs->orig_rax >= 0) {
+ /* Restart the system call - no handlers present */
+ long res = regs->rax;
+ if (res == -ERESTARTNOHAND ||
+ res == -ERESTARTSYS ||
+ res == -ERESTARTNOINTR) {
+ regs->rax = regs->orig_rax;
+ regs->rip -= 2;
+ }
+ if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) {
+ regs->rax = __NR_restart_syscall;
+ regs->rip -= 2;
+ }
+ }
+ return 0;
+}
+
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_flags)
+{
+#ifdef DEBUG_SIG
+ printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%lx pending:%lx\n",
+ thread_info_flags, regs->rip, regs->rsp, __builtin_return_address(0),signal_pending(current));
+#endif
+
+ /* Pending single-step? */
+ if (thread_info_flags & _TIF_SINGLESTEP) {
+ regs->eflags |= TF_MASK;
+ clear_thread_flag(TIF_SINGLESTEP);
+ }
+
+ /* deal with pending signal delivery */
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs,oldset);
+}
+
+void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
+{
+ struct task_struct *me = current;
+ if (exception_trace)
+ printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n",
+ me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax);
+
+ force_sig(SIGSEGV, me);
+}
--- /dev/null
+/*
+ * Intel SMP support routines.
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ * (c) 2002,2003 Andi Kleen, SuSE Labs.
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ */
+
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/interrupt.h>
+
+#include <asm/mtrr.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/mach_apic.h>
+#include <asm/proto.h>
+
+/*
+ * Smarter SMP flushing macros.
+ * c/o Linus Torvalds.
+ *
+ * These mean you can really definitely utterly forget about
+ * writing to user space from interrupts. (Its not allowed anyway).
+ *
+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
+ */
+
+static cpumask_t flush_cpumask;
+static struct mm_struct * flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+#define FLUSH_ALL 0xffffffff
+
+/*
+ * We cannot call mmdrop() because we are in interrupt context,
+ * instead update mm->cpu_vm_mask.
+ */
+static inline void leave_mm (unsigned long cpu)
+{
+ if (read_pda(mmu_state) == TLBSTATE_OK)
+ BUG();
+ clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
+ __flush_tlb();
+}
+
+/*
+ *
+ * The flush IPI assumes that a thread switch happens in this order:
+ * [cpu0: the cpu that switches]
+ * 1) switch_mm() either 1a) or 1b)
+ * 1a) thread switch to a different mm
+ * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
+ * Stop ipi delivery for the old mm. This is not synchronized with
+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
+ * for the wrong mm, and in the worst case we perform a superfluous
+ * tlb flush.
+ * 1a2) set cpu mmu_state to TLBSTATE_OK
+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
+ * was in lazy tlb mode.
+ * 1a3) update cpu active_mm
+ * Now cpu0 accepts tlb flushes for the new mm.
+ * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
+ * Now the other cpus will send tlb flush ipis.
+ * 1a4) change cr3.
+ * 1b) thread switch without mm change
+ * cpu active_mm is correct, cpu0 already handles
+ * flush ipis.
+ * 1b1) set cpu mmu_state to TLBSTATE_OK
+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
+ * Atomically set the bit [other cpus will start sending flush ipis],
+ * and test the bit.
+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
+ * 2) switch %%esp, ie current
+ *
+ * The interrupt must handle 2 special cases:
+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
+ * runs in kernel space, the cpu could load tlb entries for user space
+ * pages.
+ *
+ * The good news is that cpu mmu_state is local to each cpu, no
+ * write/read ordering problems.
+ */
+
+/*
+ * TLB flush IPI:
+ *
+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
+ * 2) Leave the mm if we are in the lazy tlb mode.
+ */
+
+asmlinkage void smp_invalidate_interrupt (void)
+{
+ unsigned long cpu;
+
+ cpu = get_cpu();
+
+ if (!cpu_isset(cpu, flush_cpumask))
+ goto out;
+ /*
+ * This was a BUG() but until someone can quote me the
+ * line from the intel manual that guarantees an IPI to
+ * multiple CPUs is retried _only_ on the erroring CPUs
+ * its staying as a return
+ *
+ * BUG();
+ */
+
+ if (flush_mm == read_pda(active_mm)) {
+ if (read_pda(mmu_state) == TLBSTATE_OK) {
+ if (flush_va == FLUSH_ALL)
+ local_flush_tlb();
+ else
+ __flush_tlb_one(flush_va);
+ } else
+ leave_mm(cpu);
+ }
+ ack_APIC_irq();
+ cpu_clear(cpu, flush_cpumask);
+
+out:
+ put_cpu_no_resched();
+}
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va)
+{
+ cpumask_t tmp;
+ /*
+ * A couple of (to be removed) sanity checks:
+ *
+ * - we do not send IPIs to not-yet booted CPUs.
+ * - current CPU must not be in mask
+ * - mask must exist :)
+ */
+ BUG_ON(cpus_empty(cpumask));
+ cpus_and(tmp, cpumask, cpu_online_map);
+ BUG_ON(!cpus_equal(tmp, cpumask));
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ if (!mm)
+ BUG();
+
+ /*
+ * I'm not happy about this global shared spinlock in the
+ * MM hot path, but we'll see how contended it is.
+ * Temporarily this turns IRQs off, so that lockups are
+ * detected by the NMI watchdog.
+ */
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_va = va;
+ cpus_or(flush_cpumask, cpumask, flush_cpumask);
+
+ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+ */
+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
+
+ while (!cpus_empty(flush_cpumask))
+ mb(); /* nothing. lockup detection does not belong here */;
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
+}
+
+void flush_tlb_current_task(void)
+{
+ struct mm_struct *mm = current->mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ preempt_enable();
+}
+
+void flush_tlb_mm (struct mm_struct * mm)
+{
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ if (current->active_mm == mm) {
+ if (current->mm)
+ local_flush_tlb();
+ else
+ leave_mm(smp_processor_id());
+ }
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ if (current->active_mm == mm) {
+ if(current->mm)
+ __flush_tlb_one(va);
+ else
+ leave_mm(smp_processor_id());
+ }
+
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, va);
+
+ preempt_enable();
+}
+
+static void do_flush_tlb_all(void* info)
+{
+ unsigned long cpu = smp_processor_id();
+
+ __flush_tlb_all();
+ if (read_pda(mmu_state) == TLBSTATE_LAZY)
+ leave_mm(cpu);
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
+}
+
+void smp_kdb_stop(void)
+{
+ send_IPI_allbutself(KDB_VECTOR);
+}
+
+/*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+
+void smp_send_reschedule(int cpu)
+{
+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+}
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t started;
+ atomic_t finished;
+ int wait;
+};
+
+static struct call_data_struct * call_data;
+
+/*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ */
+static void __smp_call_function (void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ struct call_data_struct data;
+ int cpus = num_online_cpus()-1;
+
+ if (!cpus)
+ return;
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ call_data = &data;
+ wmb();
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+
+ /* Wait for response */
+ while (atomic_read(&data.started) != cpus)
+ cpu_relax();
+
+ if (!wait)
+ return;
+
+ while (atomic_read(&data.finished) != cpus)
+ cpu_relax();
+}
+
+/*
+ * smp_call_function - run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other
+ * CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute func or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ * Actually there are a few legal cases, like panic.
+ */
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ spin_lock(&call_lock);
+ __smp_call_function(func,info,nonatomic,wait);
+ spin_unlock(&call_lock);
+ return 0;
+}
+
+void smp_stop_cpu(void)
+{
+ /*
+ * Remove this CPU:
+ */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+ local_irq_disable();
+ disable_local_APIC();
+ local_irq_enable();
+}
+
+static void smp_really_stop_cpu(void *dummy)
+{
+ smp_stop_cpu();
+ for (;;)
+ asm("hlt");
+}
+
+void smp_send_stop(void)
+{
+ int nolock = 0;
+ if (reboot_force)
+ return;
+ /* Don't deadlock on the call lock in panic */
+ if (!spin_trylock(&call_lock)) {
+ /* ignore locking because we have paniced anyways */
+ nolock = 1;
+ }
+ __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
+ if (!nolock)
+ spin_unlock(&call_lock);
+ smp_stop_cpu();
+}
+
+/*
+ * Reschedule call back. Nothing to do,
+ * all the work is done automatically when
+ * we return from the interrupt.
+ */
+asmlinkage void smp_reschedule_interrupt(void)
+{
+ ack_APIC_irq();
+}
+
+asmlinkage void smp_call_function_interrupt(void)
+{
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+ int wait = call_data->wait;
+
+ ack_APIC_irq();
+ /*
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function
+ */
+ mb();
+ atomic_inc(&call_data->started);
+ /*
+ * At this point the info structure may be out of scope unless wait==1
+ */
+ irq_enter();
+ (*func)(info);
+ irq_exit();
+ if (wait) {
+ mb();
+ atomic_inc(&call_data->finished);
+ }
+}
--- /dev/null
+/*
+ * x86 SMP booting functions
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ * Copyright 2001 Andi Kleen, SuSE Labs.
+ *
+ * Much of the core SMP work is based on previous work by Thomas Radke, to
+ * whom a great many thanks are extended.
+ *
+ * Thanks to Intel for making available several different Pentium,
+ * Pentium Pro and Pentium-II/Xeon MP machines.
+ * Original development of Linux SMP code supported by Caldera.
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ *
+ * Fixes
+ * Felix Koop : NR_CPUS used properly
+ * Jose Renau : Handle single CPU case.
+ * Alan Cox : By repeated request 8) - Total BogoMIP report.
+ * Greg Wright : Fix for kernel stacks panic.
+ * Erich Boleyn : MP v1.4 and additional changes.
+ * Matthias Sattler : Changes for 2.1 kernel map.
+ * Michel Lespinasse : Changes for 2.1 kernel map.
+ * Michael Chastain : Change trampoline.S to gnu as.
+ * Alan Cox : Dumb bug: 'B' step PPro's are fine
+ * Ingo Molnar : Added APIC timers, based on code
+ * from Jose Renau
+ * Ingo Molnar : various cleanups and rewrites
+ * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs
+ * Andi Kleen : Changed for SMP boot into long mode.
+ * Rusty Russell : Hacked into shape for new "hotplug" boot process.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+#include <linux/smp_lock.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h>
+#include <asm/mtrr.h>
+#include <asm/pgalloc.h>
+#include <asm/desc.h>
+#include <asm/kdebug.h>
+#include <asm/tlbflush.h>
+#include <asm/proto.h>
+
+/* Number of siblings per CPU package */
+int smp_num_siblings = 1;
+/* Package ID of each logical CPU */
+u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+EXPORT_SYMBOL(phys_proc_id);
+
+/* Bitmask of currently online CPUs */
+cpumask_t cpu_online_map;
+
+cpumask_t cpu_callin_map;
+cpumask_t cpu_callout_map;
+static cpumask_t smp_commenced_mask;
+
+/* Per CPU bogomips and other parameters */
+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+
+/* Set when the idlers are all forked */
+int smp_threads_ready;
+
+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+
+/*
+ * Trampoline 80x86 program as an array.
+ */
+
+extern unsigned char trampoline_data [];
+extern unsigned char trampoline_end [];
+
+/*
+ * Currently trivial. Write the real->protected mode
+ * bootstrap into the page concerned. The caller
+ * has made sure it's suitably aligned.
+ */
+
+static unsigned long __init setup_trampoline(void)
+{
+ void *tramp = __va(SMP_TRAMPOLINE_BASE);
+ extern volatile __u32 tramp_gdt_ptr;
+ tramp_gdt_ptr = __pa_symbol(&cpu_gdt_table);
+ memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
+ return virt_to_phys(tramp);
+}
+
+/*
+ * The bootstrap kernel entry code has set these up. Save them for
+ * a given CPU
+ */
+
+static void __init smp_store_cpu_info(int id)
+{
+ struct cpuinfo_x86 *c = cpu_data + id;
+
+ *c = boot_cpu_data;
+ identify_cpu(c);
+}
+
+/*
+ * TSC synchronization.
+ *
+ * We first check whether all CPUs have their TSC's synchronized,
+ * then we print a warning if not, and always resync.
+ */
+
+static atomic_t tsc_start_flag = ATOMIC_INIT(0);
+static atomic_t tsc_count_start = ATOMIC_INIT(0);
+static atomic_t tsc_count_stop = ATOMIC_INIT(0);
+static unsigned long long tsc_values[NR_CPUS];
+
+#define NR_LOOPS 5
+
+extern unsigned int fast_gettimeoffset_quotient;
+
+static void __init synchronize_tsc_bp (void)
+{
+ int i;
+ unsigned long long t0;
+ unsigned long long sum, avg;
+ long long delta;
+ long one_usec;
+ int buggy = 0;
+
+ printk(KERN_INFO "checking TSC synchronization across %u CPUs: ",num_booting_cpus());
+
+ one_usec = cpu_khz;
+
+ atomic_set(&tsc_start_flag, 1);
+ wmb();
+
+ /*
+ * We loop a few times to get a primed instruction cache,
+ * then the last pass is more or less synchronized and
+ * the BP and APs set their cycle counters to zero all at
+ * once. This reduces the chance of having random offsets
+ * between the processors, and guarantees that the maximum
+ * delay between the cycle counters is never bigger than
+ * the latency of information-passing (cachelines) between
+ * two CPUs.
+ */
+ for (i = 0; i < NR_LOOPS; i++) {
+ /*
+ * all APs synchronize but they loop on '== num_cpus'
+ */
+ while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) mb();
+ atomic_set(&tsc_count_stop, 0);
+ wmb();
+ /*
+ * this lets the APs save their current TSC:
+ */
+ atomic_inc(&tsc_count_start);
+
+ sync_core();
+ rdtscll(tsc_values[smp_processor_id()]);
+ /*
+ * We clear the TSC in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_tsc(0, 0);
+
+ /*
+ * Wait for all APs to leave the synchronization point:
+ */
+ while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) mb();
+ atomic_set(&tsc_count_start, 0);
+ wmb();
+ atomic_inc(&tsc_count_stop);
+ }
+
+ sum = 0;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_isset(i, cpu_callout_map)) {
+ t0 = tsc_values[i];
+ sum += t0;
+ }
+ }
+ avg = sum / num_booting_cpus();
+
+ sum = 0;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+
+ delta = tsc_values[i] - avg;
+ if (delta < 0)
+ delta = -delta;
+ /*
+ * We report bigger than 2 microseconds clock differences.
+ */
+ if (delta > 2*one_usec) {
+ long realdelta;
+ if (!buggy) {
+ buggy = 1;
+ printk("\n");
+ }
+ realdelta = delta / one_usec;
+ if (tsc_values[i] < avg)
+ realdelta = -realdelta;
+
+ printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
+ i, realdelta);
+ }
+
+ sum += delta;
+ }
+ if (!buggy)
+ printk("passed.\n");
+}
+
+static void __init synchronize_tsc_ap (void)
+{
+ int i;
+
+ /*
+ * Not every cpu is online at the time
+ * this gets called, so we first wait for the BP to
+ * finish SMP initialization:
+ */
+ while (!atomic_read(&tsc_start_flag)) mb();
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ atomic_inc(&tsc_count_start);
+ while (atomic_read(&tsc_count_start) != num_booting_cpus()) mb();
+
+ sync_core();
+ rdtscll(tsc_values[smp_processor_id()]);
+ if (i == NR_LOOPS-1)
+ write_tsc(0, 0);
+
+ atomic_inc(&tsc_count_stop);
+ while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
+ }
+}
+#undef NR_LOOPS
+
+static atomic_t init_deasserted;
+
+void __init smp_callin(void)
+{
+ int cpuid, phys_id;
+ unsigned long timeout;
+
+ /*
+ * If waken up by an INIT in an 82489DX configuration
+ * we may get here before an INIT-deassert IPI reaches
+ * our local APIC. We have to wait for the IPI or we'll
+ * lock up on an APIC access.
+ */
+ while (!atomic_read(&init_deasserted));
+
+ /*
+ * (This works even if the APIC is not enabled.)
+ */
+ phys_id = GET_APIC_ID(apic_read(APIC_ID));
+ cpuid = smp_processor_id();
+ if (cpu_isset(cpuid, cpu_callin_map)) {
+ panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
+ phys_id, cpuid);
+ }
+ Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
+
+ /*
+ * STARTUP IPIs are fragile beasts as they might sometimes
+ * trigger some glue motherboard logic. Complete APIC bus
+ * silence for 1 second, this overestimates the time the
+ * boot CPU is spending to send the up to 2 STARTUP IPIs
+ * by a factor of two. This should be enough.
+ */
+
+ /*
+ * Waiting 2s total for startup (udelay is not yet working)
+ */
+ timeout = jiffies + 2*HZ;
+ while (time_before(jiffies, timeout)) {
+ /*
+ * Has the boot CPU finished it's STARTUP sequence?
+ */
+ if (cpu_isset(cpuid, cpu_callout_map))
+ break;
+ rep_nop();
+ }
+
+ if (!time_before(jiffies, timeout)) {
+ panic("smp_callin: CPU%d started up but did not get a callout!\n",
+ cpuid);
+ }
+
+ /*
+ * the boot CPU has finished the init stage and is spinning
+ * on callin_map until we finish. We are free to set up this
+ * CPU, first the APIC. (this is probably redundant on most
+ * boards)
+ */
+
+ Dprintk("CALLIN, before setup_local_APIC().\n");
+ setup_local_APIC();
+
+ local_irq_enable();
+
+ /*
+ * Get our bogomips.
+ */
+ calibrate_delay();
+ Dprintk("Stack at about %p\n",&cpuid);
+
+ disable_APIC_timer();
+
+ /*
+ * Save our processor parameters
+ */
+ smp_store_cpu_info(cpuid);
+
+ local_irq_disable();
+
+ /*
+ * Allow the master to continue.
+ */
+ cpu_set(cpuid, cpu_callin_map);
+
+ /*
+ * Synchronize the TSC with the BP
+ */
+ if (cpu_has_tsc)
+ synchronize_tsc_ap();
+}
+
+int cpucount;
+
+/*
+ * Activate a secondary processor.
+ */
+void __init start_secondary(void)
+{
+ /*
+ * Dont put anything before smp_callin(), SMP
+ * booting is too fragile that we want to limit the
+ * things done here to the most necessary things.
+ */
+ cpu_init();
+ smp_callin();
+
+ /* otherwise gcc will move up the smp_processor_id before the cpu_init */
+ barrier();
+
+ Dprintk("cpu %d: waiting for commence\n", smp_processor_id());
+ while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ rep_nop();
+
+ Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
+ setup_secondary_APIC_clock();
+
+ Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
+
+ if (nmi_watchdog == NMI_IO_APIC) {
+ disable_8259A_irq(0);
+ enable_NMI_through_LVT0(NULL);
+ enable_8259A_irq(0);
+ }
+
+
+ enable_APIC_timer();
+
+ /*
+ * low-memory mappings have been cleared, flush them from
+ * the local TLBs too.
+ */
+ local_flush_tlb();
+
+ Dprintk("cpu %d eSetting cpu_online_map\n", smp_processor_id());
+ cpu_set(smp_processor_id(), cpu_online_map);
+ wmb();
+
+ cpu_idle();
+}
+
+extern volatile unsigned long init_rsp;
+extern void (*initial_code)(void);
+
+#if APIC_DEBUG
+static inline void inquire_remote_apic(int apicid)
+{
+ unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
+ char *names[] = { "ID", "VERSION", "SPIV" };
+ int timeout, status;
+
+ printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
+
+ for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
+ printk("... APIC #%d %s: ", apicid, names[i]);
+
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
+ apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
+
+ timeout = 0;
+ do {
+ udelay(100);
+ status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
+ } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
+
+ switch (status) {
+ case APIC_ICR_RR_VALID:
+ status = apic_read(APIC_RRR);
+ printk("%08x\n", status);
+ break;
+ default:
+ printk("failed\n");
+ }
+ }
+}
+#endif
+
+static int __init wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
+{
+ unsigned long send_status = 0, accept_status = 0;
+ int maxlvt, timeout, num_starts, j;
+
+ Dprintk("Asserting INIT.\n");
+
+ /*
+ * Turn INIT on target chip
+ */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /*
+ * Send IPI
+ */
+ apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
+ | APIC_DM_INIT);
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ mdelay(10);
+
+ Dprintk("Deasserting INIT.\n");
+
+ /* Target chip */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /* Send IPI */
+ apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ atomic_set(&init_deasserted, 1);
+
+ /*
+ * Should we send STARTUP IPIs ?
+ *
+ * Determine this based on the APIC version.
+ * If we don't have an integrated APIC, don't send the STARTUP IPIs.
+ */
+ if (APIC_INTEGRATED(apic_version[phys_apicid]))
+ num_starts = 2;
+ else
+ num_starts = 0;
+
+ /*
+ * Run STARTUP IPI loop.
+ */
+ Dprintk("#startup loops: %d.\n", num_starts);
+
+ maxlvt = get_maxlvt();
+
+ for (j = 1; j <= num_starts; j++) {
+ Dprintk("Sending STARTUP #%d.\n",j);
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
+ Dprintk("After apic_write.\n");
+
+ /*
+ * STARTUP IPI
+ */
+
+ /* Target chip */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /* Boot on the stack */
+ /* Kick the second */
+ apic_write_around(APIC_ICR, APIC_DM_STARTUP
+ | (start_rip >> 12));
+
+ /*
+ * Give the other CPU some time to accept the IPI.
+ */
+ udelay(300);
+
+ Dprintk("Startup point 1.\n");
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ /*
+ * Give the other CPU some time to accept the IPI.
+ */
+ udelay(200);
+ /*
+ * Due to the Pentium erratum 3AP.
+ */
+ if (maxlvt > 3) {
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ }
+ accept_status = (apic_read(APIC_ESR) & 0xEF);
+ if (send_status || accept_status)
+ break;
+ }
+ Dprintk("After Startup.\n");
+
+ if (send_status)
+ printk(KERN_ERR "APIC never delivered???\n");
+ if (accept_status)
+ printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
+
+ return (send_status | accept_status);
+}
+
+static void __init do_boot_cpu (int apicid)
+{
+ struct task_struct *idle;
+ unsigned long boot_error;
+ int timeout, cpu;
+ unsigned long start_rip;
+
+ cpu = ++cpucount;
+ /*
+ * We can't use kernel_thread since we must avoid to
+ * reschedule the child.
+ */
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle))
+ panic("failed fork for CPU %d", cpu);
+ x86_cpu_to_apicid[cpu] = apicid;
+
+ cpu_pda[cpu].pcurrent = idle;
+
+ start_rip = setup_trampoline();
+
+ init_rsp = idle->thread.rsp;
+ per_cpu(init_tss,cpu).rsp0 = init_rsp;
+ initial_code = start_secondary;
+ clear_ti_thread_flag(idle->thread_info, TIF_FORK);
+
+ printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
+ start_rip, init_rsp);
+
+ /*
+ * This grunge runs the startup process for
+ * the targeted processor.
+ */
+
+ atomic_set(&init_deasserted, 0);
+
+ Dprintk("Setting warm reset code and vector.\n");
+
+ CMOS_WRITE(0xa, 0xf);
+ local_flush_tlb();
+ Dprintk("1.\n");
+ *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
+ Dprintk("2.\n");
+ *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
+ Dprintk("3.\n");
+
+ /*
+ * Be paranoid about clearing APIC errors.
+ */
+ if (APIC_INTEGRATED(apic_version[apicid])) {
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
+ }
+
+ /*
+ * Status is now clean
+ */
+ boot_error = 0;
+
+ /*
+ * Starting actual IPI sequence...
+ */
+ boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
+
+ if (!boot_error) {
+ /*
+ * allow APs to start initializing.
+ */
+ Dprintk("Before Callout %d.\n", cpu);
+ cpu_set(cpu, cpu_callout_map);
+ Dprintk("After Callout %d.\n", cpu);
+
+ /*
+ * Wait 5s total for a response
+ */
+ for (timeout = 0; timeout < 50000; timeout++) {
+ if (cpu_isset(cpu, cpu_callin_map))
+ break; /* It has booted */
+ udelay(100);
+ }
+
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ /* number CPUs logically, starting from 1 (BSP is 0) */
+ Dprintk("OK.\n");
+ print_cpu_info(&cpu_data[cpu]);
+ Dprintk("CPU has booted.\n");
+ } else {
+ boot_error = 1;
+ if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
+ == 0xA5)
+ /* trampoline started but...? */
+ printk("Stuck ??\n");
+ else
+ /* trampoline code not run */
+ printk("Not responding.\n");
+#if APIC_DEBUG
+ inquire_remote_apic(apicid);
+#endif
+ }
+ }
+ if (boot_error) {
+ cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
+ clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+ cpucount--;
+ x86_cpu_to_apicid[cpu] = BAD_APICID;
+ x86_cpu_to_log_apicid[cpu] = BAD_APICID;
+ }
+}
+
+cycles_t cacheflush_time;
+unsigned long cache_decay_ticks;
+
+static void smp_tune_scheduling (void)
+{
+ int cachesize; /* kB */
+ unsigned long bandwidth = 1000; /* MB/s */
+ /*
+ * Rough estimation for SMP scheduling, this is the number of
+ * cycles it takes for a fully memory-limited process to flush
+ * the SMP-local cache.
+ *
+ * (For a P5 this pretty much means we will choose another idle
+ * CPU almost always at wakeup time (this is due to the small
+ * L1 cache), on PIIs it's around 50-100 usecs, depending on
+ * the cache size)
+ */
+
+ if (!cpu_khz) {
+ /*
+ * this basically disables processor-affinity
+ * scheduling on SMP without a TSC.
+ */
+ cacheflush_time = 0;
+ return;
+ } else {
+ cachesize = boot_cpu_data.x86_cache_size;
+ if (cachesize == -1) {
+ cachesize = 16; /* Pentiums, 2x8kB cache */
+ bandwidth = 100;
+ }
+
+ cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
+ }
+
+ cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
+
+ printk(KERN_INFO "per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
+ (long)cacheflush_time/(cpu_khz/1000),
+ ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
+ printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
+ (cache_decay_ticks + 1) * 1000 / HZ);
+}
+
+/*
+ * Cycle through the processors sending APIC IPIs to boot each.
+ */
+
+static void __init smp_boot_cpus(unsigned int max_cpus)
+{
+ unsigned apicid, cpu, bit, kicked;
+
+ nmi_watchdog_default();
+
+ /*
+ * Setup boot CPU information
+ */
+ smp_store_cpu_info(0); /* Final full version of the data */
+ printk(KERN_INFO "CPU%d: ", 0);
+ print_cpu_info(&cpu_data[0]);
+
+ current_thread_info()->cpu = 0;
+ smp_tune_scheduling();
+
+ if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
+ printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
+ hard_smp_processor_id());
+ physid_set(hard_smp_processor_id(), phys_cpu_present_map);
+ }
+
+ /*
+ * If we couldn't find an SMP configuration at boot time,
+ * get out of here now!
+ */
+ if (!smp_found_config) {
+ printk(KERN_NOTICE "SMP motherboard not detected.\n");
+ io_apic_irqs = 0;
+ cpu_online_map = cpumask_of_cpu(0);
+ phys_cpu_present_map = physid_mask_of_physid(0);
+ if (APIC_init_uniprocessor())
+ printk(KERN_NOTICE "Local APIC not detected."
+ " Using dummy APIC emulation.\n");
+ goto smp_done;
+ }
+
+ /*
+ * Should not be necessary because the MP table should list the boot
+ * CPU too, but we do it for the sake of robustness anyway.
+ */
+ if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
+ printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
+ boot_cpu_id);
+ physid_set(hard_smp_processor_id(), phys_cpu_present_map);
+ }
+
+ /*
+ * If we couldn't find a local APIC, then get out of here now!
+ */
+ if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
+ printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
+ boot_cpu_id);
+ printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
+ io_apic_irqs = 0;
+ cpu_online_map = cpumask_of_cpu(0);
+ phys_cpu_present_map = physid_mask_of_physid(0);
+ disable_apic = 1;
+ goto smp_done;
+ }
+
+ verify_local_APIC();
+
+ /*
+ * If SMP should be disabled, then really disable it!
+ */
+ if (!max_cpus) {
+ smp_found_config = 0;
+ printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
+ io_apic_irqs = 0;
+ cpu_online_map = cpumask_of_cpu(0);
+ phys_cpu_present_map = physid_mask_of_physid(0);
+ disable_apic = 1;
+ goto smp_done;
+ }
+
+ connect_bsp_APIC();
+ setup_local_APIC();
+
+ if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id)
+ BUG();
+
+ x86_cpu_to_apicid[0] = boot_cpu_id;
+
+ /*
+ * Now scan the CPU present map and fire up the other CPUs.
+ */
+ Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
+
+ kicked = 1;
+ for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
+ apicid = cpu_present_to_apicid(bit);
+ /*
+ * Don't even attempt to start the boot CPU!
+ */
+ if (apicid == boot_cpu_id || (apicid == BAD_APICID))
+ continue;
+
+ if (!physid_isset(apicid, phys_cpu_present_map))
+ continue;
+ if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
+ continue;
+
+ do_boot_cpu(apicid);
+ ++kicked;
+ }
+
+ /*
+ * Cleanup possible dangling ends...
+ */
+ {
+ /*
+ * Install writable page 0 entry to set BIOS data area.
+ */
+ local_flush_tlb();
+
+ /*
+ * Paranoid: Set warm reset code and vector here back
+ * to default values.
+ */
+ CMOS_WRITE(0, 0xf);
+
+ *((volatile int *) phys_to_virt(0x467)) = 0;
+ }
+
+ /*
+ * Allow the user to impress friends.
+ */
+
+ Dprintk("Before bogomips.\n");
+ if (!cpucount) {
+ printk(KERN_INFO "Only one processor found.\n");
+ } else {
+ unsigned long bogosum = 0;
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ if (cpu_isset(cpu, cpu_callout_map))
+ bogosum += cpu_data[cpu].loops_per_jiffy;
+ printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ cpucount+1,
+ bogosum/(500000/HZ),
+ (bogosum/(5000/HZ))%100);
+ Dprintk("Before bogocount - setting activated=1.\n");
+ }
+
+ /*
+ * Construct cpu_sibling_map[], so that we can tell the
+ * sibling CPU efficiently.
+ */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ cpus_clear(cpu_sibling_map[cpu]);
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ int siblings = 0;
+ int i;
+ if (!cpu_isset(cpu, cpu_callout_map))
+ continue;
+
+ if (smp_num_siblings > 1) {
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ siblings++;
+ cpu_set(i, cpu_sibling_map[cpu]);
+ }
+ }
+ } else {
+ siblings++;
+ cpu_set(cpu, cpu_sibling_map[cpu]);
+ }
+
+ if (siblings != smp_num_siblings) {
+ printk(KERN_WARNING
+ "WARNING: %d siblings found for CPU%d, should be %d\n",
+ siblings, cpu, smp_num_siblings);
+ smp_num_siblings = siblings;
+ }
+ }
+
+ Dprintk("Boot done.\n");
+
+ /*
+ * Here we can be sure that there is an IO-APIC in the system. Let's
+ * go and set it up:
+ */
+ if (!skip_ioapic_setup && nr_ioapics)
+ setup_IO_APIC();
+ else
+ nr_ioapics = 0;
+
+ setup_boot_APIC_clock();
+
+ /*
+ * Synchronize the TSC with the AP
+ */
+ if (cpu_has_tsc && cpucount)
+ synchronize_tsc_bp();
+
+ smp_done:
+ time_init_smp();
+}
+
+/* These are wrappers to interface to the new boot process. Someone
+ who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_boot_cpus(max_cpus);
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+ cpu_set(smp_processor_id(), cpu_online_map);
+ cpu_set(smp_processor_id(), cpu_callout_map);
+}
+
+int __devinit __cpu_up(unsigned int cpu)
+{
+ /* This only works at boot for x86. See "rewrite" above. */
+ if (cpu_isset(cpu, smp_commenced_mask)) {
+ local_irq_enable();
+ return -ENOSYS;
+ }
+
+ /* In case one didn't come up */
+ if (!cpu_isset(cpu, cpu_callin_map)) {
+ local_irq_enable();
+ return -EIO;
+ }
+ local_irq_enable();
+
+ /* Unleash the CPU! */
+ Dprintk("waiting for cpu %d\n", cpu);
+
+ cpu_set(cpu, smp_commenced_mask);
+ while (!cpu_isset(cpu, cpu_online_map))
+ mb();
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+#ifdef CONFIG_X86_IO_APIC
+ setup_ioapic_dest();
+#endif
+ zap_low_mappings();
+}
+
--- /dev/null
+/*
+ * linux/arch/i386/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26 Markus Kuhn
+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ * precision CMOS clock update
+ * 1996-05-03 Ingo Molnar
+ * fixed time warps in do_[slow|fast]_gettimeoffset()
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1998-09-05 (Various)
+ * More robust do_fast_gettimeoffset() algorithm implemented
+ * (works with APM, Cyrix 6x86MX and Centaur C6),
+ * monotonic gettimeofday() with fast_get_timeoffset(),
+ * drift-proof precision TSC calibration on boot
+ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
+ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
+ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
+ * 1998-12-16 Andrea Arcangeli
+ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
+ * because was not accounting lost_ticks.
+ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
+ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ * serialize accesses to xtime/lost_ticks).
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/bcd.h>
+#include <linux/efi.h>
+#include <linux/mca.h>
+#include <linux/sysctl.h>
+#include <linux/percpu.h>
+
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/msr.h>
+#include <asm/delay.h>
+#include <asm/mpspec.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/timer.h>
+
+#include "mach_time.h"
+
+#include <linux/timex.h>
+#include <linux/config.h>
+
+#include <asm/hpet.h>
+
+#include <asm/arch_hooks.h>
+
+#include "io_ports.h"
+
+extern spinlock_t i8259A_lock;
+int pit_latch_buggy; /* extern */
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+#if defined(__x86_64__)
+unsigned long vxtime_hz = PIT_TICK_RATE;
+
+struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
+
+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
+struct timespec __xtime __section_xtime;
+struct timezone __sys_tz __section_sys_tz;
+
+static inline void rdtscll_sync(unsigned long *tsc)
+{
+#ifdef CONFIG_SMP
+ sync_core();
+#endif
+ rdtscll(*tsc);
+}
+#endif
+
+u32 cpu_khz; /* Detected as we calibrate the TSC */
+
+extern unsigned long wall_jiffies;
+
+spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+
+DEFINE_SPINLOCK(i8253_lock);
+EXPORT_SYMBOL(i8253_lock);
+
+extern struct init_timer_opts timer_tsc_init;
+extern struct timer_opts timer_tsc;
+struct timer_opts *cur_timer = &timer_tsc;
+
+/* These are peridically updated in shared_info, and then copied here. */
+u32 shadow_tsc_stamp;
+u64 shadow_system_time;
+static u32 shadow_time_version;
+static struct timeval shadow_tv;
+
+/*
+ * We use this to ensure that gettimeofday() is monotonically increasing. We
+ * only break this guarantee if the wall clock jumps backwards "a long way".
+ */
+static struct timeval last_seen_tv = {0,0};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+/* Periodically propagate synchronised time base to the RTC and to Xen. */
+static long last_rtc_update, last_update_to_xen;
+#endif
+
+/* Periodically take synchronised time base from Xen, if we need it. */
+static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
+
+/* Keep track of last time we did processing/updating of jiffies and xtime. */
+u64 processed_system_time; /* System time (ns) at last processing. */
+DEFINE_PER_CPU(u64, processed_system_time);
+
+#define NS_PER_TICK (1000000000ULL/HZ)
+
+#define HANDLE_USEC_UNDERFLOW(_tv) do { \
+ while ((_tv).tv_usec < 0) { \
+ (_tv).tv_usec += USEC_PER_SEC; \
+ (_tv).tv_sec--; \
+ } \
+} while (0)
+#define HANDLE_USEC_OVERFLOW(_tv) do { \
+ while ((_tv).tv_usec >= USEC_PER_SEC) { \
+ (_tv).tv_usec -= USEC_PER_SEC; \
+ (_tv).tv_sec++; \
+ } \
+} while (0)
+static inline void __normalize_time(time_t *sec, s64 *nsec)
+{
+ while (*nsec >= NSEC_PER_SEC) {
+ (*nsec) -= NSEC_PER_SEC;
+ (*sec)++;
+ }
+ while (*nsec < 0) {
+ (*nsec) += NSEC_PER_SEC;
+ (*sec)--;
+ }
+}
+
+/* Does this guest OS track Xen time, or set its wall clock independently? */
+static int independent_wallclock = 0;
+static int __init __independent_wallclock(char *str)
+{
+ independent_wallclock = 1;
+ return 1;
+}
+__setup("independent_wallclock", __independent_wallclock);
+#define INDEPENDENT_WALLCLOCK() \
+ (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
+
+/*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area. Must be called with the xtime_lock held for writing.
+ */
+static void __get_time_values_from_xen(void)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
+
+ do {
+ shadow_time_version = s->time_version2;
+ rmb();
+ shadow_tv.tv_sec = s->wc_sec;
+ shadow_tv.tv_usec = s->wc_usec;
+ shadow_tsc_stamp = (u32)s->tsc_timestamp;
+ shadow_system_time = s->system_time;
+ rmb();
+ }
+ while (shadow_time_version != s->time_version1);
+
+ cur_timer->mark_offset();
+}
+
+#define TIME_VALUES_UP_TO_DATE \
+ ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
+
+/*
+ * This version of gettimeofday has microsecond resolution
+ * and better than microsecond precision on fast x86 machines with TSC.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long seq;
+ unsigned long usec, sec;
+ unsigned long max_ntp_tick;
+ unsigned long flags;
+ s64 nsec;
+
+ do {
+ unsigned long lost;
+
+ seq = read_seqbegin(&xtime_lock);
+
+ usec = cur_timer->get_offset();
+ lost = jiffies - wall_jiffies;
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0)) {
+ max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
+ usec = min(usec, max_ntp_tick);
+
+ if (lost)
+ usec += lost * max_ntp_tick;
+ }
+ else if (unlikely(lost))
+ usec += lost * (USEC_PER_SEC / HZ);
+
+ sec = xtime.tv_sec;
+ usec += (xtime.tv_nsec / NSEC_PER_USEC);
+
+ nsec = shadow_system_time - processed_system_time;
+ __normalize_time(&sec, &nsec);
+ usec += (long)nsec / NSEC_PER_USEC;
+
+ if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
+ /*
+ * We may have blocked for a long time,
+ * rendering our calculations invalid
+ * (e.g. the time delta may have
+ * overflowed). Detect that and recalculate
+ * with fresh values.
+ */
+ write_seqlock_irqsave(&xtime_lock, flags);
+ __get_time_values_from_xen();
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ continue;
+ }
+ } while (read_seqretry(&xtime_lock, seq));
+
+ while (usec >= USEC_PER_SEC) {
+ usec -= USEC_PER_SEC;
+ sec++;
+ }
+
+ /* Ensure that time-of-day is monotonically increasing. */
+ if ((sec < last_seen_tv.tv_sec) ||
+ ((sec == last_seen_tv.tv_sec) && (usec < last_seen_tv.tv_usec))) {
+ sec = last_seen_tv.tv_sec;
+ usec = last_seen_tv.tv_usec;
+ } else {
+ last_seen_tv.tv_sec = sec;
+ last_seen_tv.tv_usec = usec;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec;
+ s64 nsec;
+ struct timespec xentime;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (!INDEPENDENT_WALLCLOCK())
+ return 0; /* Silent failure? */
+
+ write_seqlock_irq(&xtime_lock);
+
+ /*
+ * Ensure we don't get blocked for a long time so that our time delta
+ * overflows. If that were to happen then our shadow time values would
+ * be stale, so we can retry with fresh ones.
+ */
+ again:
+ nsec = (s64)tv->tv_nsec -
+ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC);
+ if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
+ __get_time_values_from_xen();
+ goto again;
+ }
+
+ __normalize_time(&sec, &nsec);
+ set_normalized_timespec(&xentime, sec, nsec);
+
+ /*
+ * This is revolting. We need to set "xtime" correctly. However, the
+ * value in this location is the value at the most recent update of
+ * wall time. Discover what correction gettimeofday() would have
+ * made, and then undo it!
+ */
+ nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
+
+ nsec -= (shadow_system_time - processed_system_time);
+
+ __normalize_time(&sec, &nsec);
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+
+ /* Reset all our running time counts. They make no sense now. */
+ last_seen_tv.tv_sec = 0;
+ last_update_from_xen = 0;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (xen_start_info.flags & SIF_INITDOMAIN) {
+ dom0_op_t op;
+ last_rtc_update = last_update_to_xen = 0;
+ op.cmd = DOM0_SETTIME;
+ op.u.settime.secs = xentime.tv_sec;
+ op.u.settime.usecs = xentime.tv_nsec / NSEC_PER_USEC;
+ op.u.settime.system_time = shadow_system_time;
+ write_sequnlock_irq(&xtime_lock);
+ HYPERVISOR_dom0_op(&op);
+ } else
+#endif
+ write_sequnlock_irq(&xtime_lock);
+
+ clock_was_set();
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int retval;
+
+ /* gets recalled with irq locally disabled */
+ spin_lock(&rtc_lock);
+ if (efi_enabled)
+ retval = efi_set_rtc_mmss(nowtime);
+ else
+ retval = mach_set_rtc_mmss(nowtime);
+ spin_unlock(&rtc_lock);
+
+ return retval;
+}
+#endif
+
+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
+ * Note: This function is required to return accurate
+ * time even in the absence of multiple timer ticks.
+ */
+unsigned long long monotonic_clock(void)
+{
+ return cur_timer->monotonic_clock();
+}
+EXPORT_SYMBOL(monotonic_clock);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+unsigned long profile_pc(struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
+
+ if (in_lock_functions(pc))
+ return *(unsigned long *)(regs->ebp + 4);
+
+ return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+#endif
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static inline void do_timer_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ time_t wtm_sec, sec;
+ s64 delta, nsec;
+ long sec_diff, wtm_nsec;
+
+ do {
+ __get_time_values_from_xen();
+
+ delta = (s64)(shadow_system_time +
+ ((s64)cur_timer->get_offset() *
+ (s64)NSEC_PER_USEC) -
+ processed_system_time);
+ }
+ while (!TIME_VALUES_UP_TO_DATE);
+
+ if (unlikely(delta < 0)) {
+ printk("Timer ISR: Time went backwards: %lld %lld %lld %lld\n",
+ delta, shadow_system_time,
+ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
+ processed_system_time);
+ return;
+ }
+
+ /* Process elapsed jiffies since last call. */
+ while (delta >= NS_PER_TICK) {
+ delta -= NS_PER_TICK;
+ processed_system_time += NS_PER_TICK;
+ do_timer(regs);
+ update_process_times(user_mode(regs));
+ if (regs)
+ profile_tick(CPU_PROFILING, regs);
+ }
+
+ /*
+ * Take synchronised time from Xen once a minute if we're not
+ * synchronised ourselves, and we haven't chosen to keep an independent
+ * time base.
+ */
+ if (!INDEPENDENT_WALLCLOCK() &&
+ ((time_status & STA_UNSYNC) != 0) &&
+ (xtime.tv_sec > (last_update_from_xen + 60))) {
+ /* Adjust shadow for jiffies that haven't updated xtime yet. */
+ shadow_tv.tv_usec -=
+ (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
+ HANDLE_USEC_UNDERFLOW(shadow_tv);
+
+ /*
+ * Reset our running time counts if they are invalidated by
+ * a warp backwards of more than 500ms.
+ */
+ sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
+ if (unlikely(abs(sec_diff) > 1) ||
+ unlikely(((sec_diff * USEC_PER_SEC) +
+ (xtime.tv_nsec / NSEC_PER_USEC) -
+ shadow_tv.tv_usec) > 500000)) {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ last_rtc_update = last_update_to_xen = 0;
+#endif
+ last_seen_tv.tv_sec = 0;
+ }
+
+ /* Update our unsynchronised xtime appropriately. */
+ sec = shadow_tv.tv_sec;
+ nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+
+ __normalize_time(&sec, &nsec);
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ last_update_from_xen = sec;
+ }
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (!(xen_start_info.flags & SIF_INITDOMAIN))
+ return;
+
+ /* Send synchronised time to Xen approximately every minute. */
+ if (((time_status & STA_UNSYNC) == 0) &&
+ (xtime.tv_sec > (last_update_to_xen + 60))) {
+ dom0_op_t op;
+ struct timeval tv;
+
+ tv.tv_sec = xtime.tv_sec;
+ tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
+ tv.tv_usec += (jiffies - wall_jiffies) * (USEC_PER_SEC/HZ);
+ HANDLE_USEC_OVERFLOW(tv);
+
+ op.cmd = DOM0_SETTIME;
+ op.u.settime.secs = tv.tv_sec;
+ op.u.settime.usecs = tv.tv_usec;
+ op.u.settime.system_time = shadow_system_time;
+ HYPERVISOR_dom0_op(&op);
+
+ last_update_to_xen = xtime.tv_sec;
+ }
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ (xtime.tv_nsec / 1000)
+ >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
+ (xtime.tv_nsec / 1000)
+ <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
+ /* horrible...FIXME */
+ if (efi_enabled) {
+ if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600;
+ } else if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+ }
+#endif
+}
+
+/*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_seqlock(&xtime_lock);
+ do_timer_interrupt(irq, NULL, regs);
+ write_sequnlock(&xtime_lock);
+ return IRQ_HANDLED;
+}
+
+/* not static: needed by APM */
+unsigned long get_cmos_time(void)
+{
+ unsigned long retval;
+
+ spin_lock(&rtc_lock);
+
+ if (efi_enabled)
+ retval = efi_get_time();
+ else
+ retval = mach_get_cmos_time();
+
+ spin_unlock(&rtc_lock);
+
+ return retval;
+}
+
+static long clock_cmos_diff, sleep_start;
+
+static int timer_suspend(struct sys_device *dev, u32 state)
+{
+ /*
+ * Estimate time zone so that set_time can update the clock
+ */
+ clock_cmos_diff = -get_cmos_time();
+ clock_cmos_diff += get_seconds();
+ sleep_start = get_cmos_time();
+ return 0;
+}
+
+static int timer_resume(struct sys_device *dev)
+{
+ unsigned long flags;
+ unsigned long sec;
+ unsigned long sleep_length;
+
+#ifdef CONFIG_HPET_TIMER
+ if (is_hpet_enabled())
+ hpet_reenable();
+#endif
+ sec = get_cmos_time() + clock_cmos_diff;
+ sleep_length = (get_cmos_time() - sleep_start) * HZ;
+ write_seqlock_irqsave(&xtime_lock, flags);
+ xtime.tv_sec = sec;
+ xtime.tv_nsec = 0;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ jiffies += sleep_length;
+ wall_jiffies += sleep_length;
+ return 0;
+}
+
+static struct sysdev_class timer_sysclass = {
+ .resume = timer_resume,
+ .suspend = timer_suspend,
+ set_kset_name("timer"),
+};
+
+
+/* XXX this driverfs stuff should probably go elsewhere later -john */
+static struct sys_device device_timer = {
+ .id = 0,
+ .cls = &timer_sysclass,
+};
+
+static int time_init_device(void)
+{
+ int error = sysdev_class_register(&timer_sysclass);
+ if (!error)
+ error = sysdev_register(&device_timer);
+ return error;
+}
+
+device_initcall(time_init_device);
+
+#ifdef CONFIG_HPET_TIMER
+extern void (*late_time_init)(void);
+/* Duplicate of time_init() below, with hpet_enable part added */
+void __init hpet_time_init(void)
+{
+ xtime.tv_sec = get_cmos_time();
+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ if (hpet_enable() >= 0) {
+ printk("Using HPET for base-timer\n");
+ }
+
+ cur_timer = select_timer();
+ printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+ time_init_hook();
+}
+#endif
+
+/* Dynamically-mapped IRQ. */
+static int TIMER_IRQ;
+
+static struct irqaction irq_timer = {
+ timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
+ NULL, NULL
+};
+
+void __init time_init(void)
+{
+#ifdef CONFIG_HPET_TIMER
+ if (is_hpet_capable()) {
+ /*
+ * HPET initialization needs to do memory-mapped io. So, let
+ * us do a late initialization after mem_init().
+ */
+ late_time_init = hpet_time_init;
+ return;
+ }
+#endif
+ __get_time_values_from_xen();
+ xtime.tv_sec = shadow_tv.tv_sec;
+ xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+ processed_system_time = shadow_system_time;
+
+ if (timer_tsc_init.init(NULL) != 0)
+ BUG();
+ printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+#if defined(__x86_64__)
+ vxtime.mode = VXTIME_TSC;
+ vxtime.quot = (1000000L << 32) / vxtime_hz;
+ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
+ vxtime.hz = vxtime_hz;
+ rdtscll_sync(&vxtime.last_tsc);
+#endif
+
+ TIMER_IRQ = bind_virq_to_irq(VIRQ_TIMER);
+
+ (void)setup_irq(TIMER_IRQ, &irq_timer);
+}
+
+/* Convert jiffies to system time. Call with xtime_lock held for reading. */
+static inline u64 __jiffies_to_st(unsigned long j)
+{
+ return processed_system_time + ((j - jiffies) * NS_PER_TICK);
+}
+
+/*
+ * This function works out when the the next timer function has to be
+ * executed (by looking at the timer list) and sets the Xen one-shot
+ * domain timer to the appropriate value. This is typically called in
+ * cpu_idle() before the domain blocks.
+ *
+ * The function returns a non-0 value on error conditions.
+ *
+ * It must be called with interrupts disabled.
+ */
+int set_timeout_timer(void)
+{
+ u64 alarm = 0;
+ int ret = 0;
+#ifdef CONFIG_SMP
+ unsigned long seq;
+#endif
+
+ /*
+ * This is safe against long blocking (since calculations are
+ * not based on TSC deltas). It is also safe against warped
+ * system time since suspend-resume is cooperative and we
+ * would first get locked out.
+ */
+#ifdef CONFIG_SMP
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ if (smp_processor_id())
+ alarm = __jiffies_to_st(jiffies + 1);
+ else
+ alarm = __jiffies_to_st(jiffies + 1);
+ } while (read_seqretry(&xtime_lock, seq));
+#else
+ alarm = __jiffies_to_st(next_timer_interrupt());
+#endif
+
+ /* Failure is pretty bad, but we'd best soldier on. */
+ if ( HYPERVISOR_set_timer_op(alarm) != 0 )
+ ret = -1;
+
+ return ret;
+}
+
+void time_suspend(void)
+{
+ /* nothing */
+}
+
+/* No locking required. We are only CPU running, and interrupts are off. */
+void time_resume(void)
+{
+ if (timer_tsc_init.init(NULL) != 0)
+ BUG();
+
+ /* Get timebases for new environment. */
+ __get_time_values_from_xen();
+
+ /* Reset our own concept of passage of system time. */
+ processed_system_time = shadow_system_time;
+
+ /* Accept a warp in UTC (wall-clock) time. */
+ last_seen_tv.tv_sec = 0;
+
+ /* Make sure we resync UTC time with Xen on next timer interrupt. */
+ last_update_from_xen = 0;
+}
+
+#ifdef CONFIG_SMP
+#define xxprint(msg) HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg), msg)
+
+static irqreturn_t local_timer_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ s64 delta;
+ int cpu = smp_processor_id();
+
+ do {
+ __get_time_values_from_xen();
+
+ delta = (s64)(shadow_system_time +
+ ((s64)cur_timer->get_offset() *
+ (s64)NSEC_PER_USEC) -
+ per_cpu(processed_system_time, cpu));
+ }
+ while (!TIME_VALUES_UP_TO_DATE);
+
+ if (unlikely(delta < 0)) {
+ printk("Timer ISR/%d: Time went backwards: %lld %lld %lld %lld\n",
+ cpu, delta, shadow_system_time,
+ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
+ processed_system_time);
+ return IRQ_HANDLED;
+ }
+
+ /* Process elapsed jiffies since last call. */
+ while (delta >= NS_PER_TICK) {
+ delta -= NS_PER_TICK;
+ per_cpu(processed_system_time, cpu) += NS_PER_TICK;
+ if (regs)
+ update_process_times(user_mode(regs));
+#if 0
+ if (regs)
+ profile_tick(CPU_PROFILING, regs);
+#endif
+ }
+
+ if (smp_processor_id() == 0) {
+ xxprint("bug bug\n");
+ BUG();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction local_irq_timer = {
+ local_timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "ltimer",
+ NULL, NULL
+};
+
+void local_setup_timer(void)
+{
+ int seq, time_irq;
+ int cpu = smp_processor_id();
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ per_cpu(processed_system_time, cpu) = shadow_system_time;
+ } while (read_seqretry(&xtime_lock, seq));
+
+ time_irq = bind_virq_to_irq(VIRQ_TIMER);
+ (void)setup_irq(time_irq, &local_irq_timer);
+}
+#endif
+
+/*
+ * /proc/sys/xen: This really belongs in another file. It can stay here for
+ * now however.
+ */
+static ctl_table xen_subtable[] = {
+ {1, "independent_wallclock", &independent_wallclock,
+ sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
+ {0}
+};
+static ctl_table xen_table[] = {
+ {123, "xen", NULL, 0, 0555, xen_subtable},
+ {0}
+};
+static int __init xen_sysctl_init(void)
+{
+ (void)register_sysctl_table(xen_table, 0);
+ return 0;
+}
+__initcall(xen_sysctl_init);
--- /dev/null
+/*
+ * linux/arch/x86-64/traps.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/debugreg.h>
+#include <asm/desc.h>
+#include <asm/i387.h>
+#include <asm/kdebug.h>
+#include <asm/processor.h>
+
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/pda.h>
+#include <asm/proto.h>
+#include <asm/nmi.h>
+
+#include <linux/irq.h>
+
+
+extern struct gate_struct idt_table[256];
+
+asmlinkage void divide_error(void);
+asmlinkage void debug(void);
+asmlinkage void nmi(void);
+asmlinkage void int3(void);
+asmlinkage void overflow(void);
+asmlinkage void bounds(void);
+asmlinkage void invalid_op(void);
+asmlinkage void device_not_available(void);
+asmlinkage void double_fault(void);
+asmlinkage void coprocessor_segment_overrun(void);
+asmlinkage void invalid_TSS(void);
+asmlinkage void segment_not_present(void);
+asmlinkage void stack_segment(void);
+asmlinkage void general_protection(void);
+asmlinkage void page_fault(void);
+asmlinkage void coprocessor_error(void);
+asmlinkage void simd_coprocessor_error(void);
+asmlinkage void reserved(void);
+asmlinkage void alignment_check(void);
+asmlinkage void machine_check(void);
+asmlinkage void spurious_interrupt_bug(void);
+asmlinkage void call_debug(void);
+
+struct notifier_block *die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
+}
+
+static inline void conditional_sti(struct pt_regs *regs)
+{
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_enable();
+}
+
+static int kstack_depth_to_print = 10;
+
+#ifdef CONFIG_KALLSYMS
+#include <linux/kallsyms.h>
+int printk_address(unsigned long address)
+{
+ unsigned long offset = 0, symsize;
+ const char *symname;
+ char *modname;
+ char *delim = ":";
+ char namebuf[128];
+
+ symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
+ if (!symname)
+ return printk("[<%016lx>]", address);
+ if (!modname)
+ modname = delim = "";
+ return printk("<%016lx>{%s%s%s%s%+ld}",
+ address,delim,modname,delim,symname,offset);
+}
+#else
+int printk_address(unsigned long address)
+{
+ return printk("[<%016lx>]", address);
+}
+#endif
+
+unsigned long *in_exception_stack(int cpu, unsigned long stack)
+{
+ int k;
+ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ unsigned long end = tss->ist[k] + EXCEPTION_STKSZ;
+
+ if (stack >= tss->ist[k] && stack <= end)
+ return (unsigned long *)end;
+ }
+ return NULL;
+}
+
+/*
+ * x86-64 can have upto three kernel stacks:
+ * process stack
+ * interrupt stack
+ * severe exception (double fault, nmi, stack fault) hardware stack
+ * Check and process them in order.
+ */
+
+void show_trace(unsigned long *stack)
+{
+ unsigned long addr;
+ unsigned long *irqstack, *irqstack_end, *estack_end;
+ const int cpu = safe_smp_processor_id();
+ int i;
+
+ printk("\nCall Trace:");
+ i = 0;
+
+ estack_end = in_exception_stack(cpu, (unsigned long)stack);
+ if (estack_end) {
+ while (stack < estack_end) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ i += printk_address(addr);
+ i += printk(" ");
+ if (i > 50) {
+ printk("\n");
+ i = 0;
+ }
+ }
+ }
+ i += printk(" <EOE> ");
+ i += 7;
+ stack = (unsigned long *) estack_end[-2];
+ }
+
+ irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
+ irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE + 64);
+
+ if (stack >= irqstack && stack < irqstack_end) {
+ printk("<IRQ> ");
+ while (stack < irqstack_end) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (__kernel_text_address(addr)) {
+ i += printk_address(addr);
+ i += printk(" ");
+ if (i > 50) {
+ printk("\n ");
+ i = 0;
+ }
+ }
+ }
+ stack = (unsigned long *) (irqstack_end[-1]);
+ printk(" <EOI> ");
+ i += 7;
+ }
+
+ while (((long) stack & (THREAD_SIZE-1)) != 0) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ i += printk_address(addr);
+ i += printk(" ");
+ if (i > 50) {
+ printk("\n ");
+ i = 0;
+ }
+ }
+ }
+ printk("\n");
+}
+
+void show_stack(struct task_struct *tsk, unsigned long * rsp)
+{
+ unsigned long *stack;
+ int i;
+ const int cpu = safe_smp_processor_id();
+ unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
+ unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);
+
+ // debugging aid: "show_stack(NULL, NULL);" prints the
+ // back trace for this cpu.
+
+ if (rsp == NULL) {
+ if (tsk)
+ rsp = (unsigned long *)tsk->thread.rsp;
+ else
+ rsp = (unsigned long *)&rsp;
+ }
+
+ stack = rsp;
+ for(i=0; i < kstack_depth_to_print; i++) {
+ if (stack >= irqstack && stack <= irqstack_end) {
+ if (stack == irqstack_end) {
+ stack = (unsigned long *) (irqstack_end[-1]);
+ printk(" <EOI> ");
+ }
+ } else {
+ if (((long) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ }
+ if (i && ((i % 4) == 0))
+ printk("\n ");
+ printk("%016lx ", *stack++);
+ }
+ show_trace((unsigned long *)rsp);
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+ unsigned long dummy;
+ show_trace(&dummy);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_registers(struct pt_regs *regs)
+{
+ int i;
+ int in_kernel = (regs->cs & 3) == 0;
+ unsigned long rsp;
+ const int cpu = safe_smp_processor_id();
+ struct task_struct *cur = cpu_pda[cpu].pcurrent;
+
+ rsp = regs->rsp;
+
+ printk("CPU %d ", cpu);
+ __show_regs(regs);
+ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
+ cur->comm, cur->pid, cur->thread_info, cur);
+
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+ if (in_kernel) {
+
+ printk("Stack: ");
+ show_stack(NULL, (unsigned long*)rsp);
+
+ printk("\nCode: ");
+ if(regs->rip < PAGE_OFFSET)
+ goto bad;
+
+ for(i=0;i<20;i++)
+ {
+ unsigned char c;
+ if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
+bad:
+ printk(" Bad RIP value.");
+ break;
+ }
+ printk("%02x ", c);
+ }
+ }
+ printk("\n");
+}
+
+void handle_BUG(struct pt_regs *regs)
+{
+ struct bug_frame f;
+ char tmp;
+
+ if (regs->cs & 3)
+ return;
+ if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
+ sizeof(struct bug_frame)))
+ return;
+ if ((unsigned long)f.filename < __PAGE_OFFSET ||
+ f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
+ return;
+ if (__get_user(tmp, f.filename))
+ f.filename = "unmapped filename";
+ printk("----------- [cut here ] --------- [please bite here ] ---------\n");
+ printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", f.filename, f.line);
+}
+
+void out_of_line_bug(void)
+{
+ BUG();
+}
+
+static DEFINE_SPINLOCK(die_lock);
+static int die_owner = -1;
+
+void oops_begin(void)
+{
+ int cpu = safe_smp_processor_id();
+ /* racy, but better than risking deadlock. */
+ local_irq_disable();
+ if (!spin_trylock(&die_lock)) {
+ if (cpu == die_owner)
+ /* nested oops. should stop eventually */;
+ else
+ spin_lock(&die_lock);
+ }
+ die_owner = cpu;
+ console_verbose();
+ bust_spinlocks(1);
+}
+
+void oops_end(void)
+{
+ die_owner = -1;
+ bust_spinlocks(0);
+ spin_unlock(&die_lock);
+ local_irq_enable(); /* make sure back scroll still works */
+ if (panic_on_oops)
+ panic("Oops");
+}
+
+void __die(const char * str, struct pt_regs * regs, long err)
+{
+ static int die_counter;
+ printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
+#endif
+ printk("\n");
+ notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+ show_registers(regs);
+ /* Executive summary in case the oops scrolled away */
+ printk(KERN_ALERT "RIP ");
+ printk_address(regs->rip);
+ printk(" RSP <%016lx>\n", regs->rsp);
+}
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ oops_begin();
+ handle_BUG(regs);
+ __die(str, regs, err);
+ oops_end();
+ do_exit(SIGSEGV);
+}
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+ if (!(regs->eflags & VM_MASK) && (regs->cs == __KERNEL_CS))
+ die(str, regs, err);
+}
+
+#if 0
+void die_nmi(char *str, struct pt_regs *regs)
+{
+ oops_begin();
+ /*
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+ printk(str, safe_smp_processor_id());
+ show_registers(regs);
+ if (panic_on_timeout || panic_on_oops)
+ panic("nmi watchdog");
+ printk("console shuts up ...\n");
+ oops_end();
+ do_exit(SIGSEGV);
+}
+#endif
+
+static void do_trap(int trapnr, int signr, char *str,
+ struct pt_regs * regs, long error_code, siginfo_t *info)
+{
+ conditional_sti(regs);
+
+#ifdef CONFIG_CHECKING
+ {
+ unsigned long gs;
+ struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
+ rdmsrl(MSR_GS_BASE, gs);
+ if (gs != (unsigned long)pda) {
+ wrmsrl(MSR_GS_BASE, pda);
+ printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda,
+ regs->rip);
+ }
+ }
+#endif
+
+ if ((regs->cs & 3) != 0) {
+ struct task_struct *tsk = current;
+
+ if (exception_trace && unhandled_signal(tsk, signr))
+ printk(KERN_INFO
+ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
+ tsk->comm, tsk->pid, str,
+ regs->rip,regs->rsp,error_code);
+
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+ if (info)
+ force_sig_info(signr, info, tsk);
+ else
+ force_sig(signr, tsk);
+ return;
+ }
+
+
+ /* kernel trap */
+ {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ } else
+ die(str, regs, error_code);
+ return;
+ }
+}
+
+#define DO_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, regs, error_code, NULL); \
+}
+
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)siaddr; \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, regs, error_code, &info); \
+}
+
+DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
+DO_ERROR( 4, SIGSEGV, "overflow", overflow)
+DO_ERROR( 5, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->rip)
+DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+DO_ERROR(18, SIGSEGV, "reserved", reserved)
+
+#define DO_ERROR_STACK(trapnr, signr, str, name) \
+asmlinkage void *do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ struct pt_regs *pr = ((struct pt_regs *)(current->thread.rsp0))-1; \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return regs; \
+ if (regs->cs & 3) { \
+ memcpy(pr, regs, sizeof(struct pt_regs)); \
+ regs = pr; \
+ } \
+ do_trap(trapnr, signr, str, regs, error_code, NULL); \
+ return regs; \
+}
+
+DO_ERROR_STACK(12, SIGBUS, "stack segment", stack_segment)
+DO_ERROR_STACK( 8, SIGSEGV, "double fault", double_fault)
+
+asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
+{
+ conditional_sti(regs);
+
+#ifdef CONFIG_CHECKING
+ {
+ unsigned long gs;
+ struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
+ rdmsrl(MSR_GS_BASE, gs);
+ if (gs != (unsigned long)pda) {
+ wrmsrl(MSR_GS_BASE, pda);
+ oops_in_progress++;
+ printk("general protection handler: wrong gs %lx expected %p\n", gs, pda);
+ oops_in_progress--;
+ }
+ }
+#endif
+
+ if ((regs->cs & 3)!=0) {
+ struct task_struct *tsk = current;
+
+ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
+ printk(KERN_INFO
+ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
+ tsk->comm, tsk->pid,
+ regs->rip,regs->rsp,error_code);
+
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+ force_sig(SIGSEGV, tsk);
+ return;
+ }
+
+ /* kernel gp */
+ {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ return;
+ }
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
+ die("general protection fault", regs, error_code);
+ }
+}
+
+#if 0
+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+{
+ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
+ printk("You probably have a hardware problem with your RAM chips\n");
+
+ /* Clear and disable the memory parity error line. */
+ reason = (reason & 0xf) | 4;
+ outb(reason, 0x61);
+}
+
+static void io_check_error(unsigned char reason, struct pt_regs * regs)
+{
+ printk("NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+ reason = (reason & 0xf) | 8;
+ outb(reason, 0x61);
+ mdelay(2000);
+ reason &= ~8;
+ outb(reason, 0x61);
+}
+
+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
+ printk("Dazed and confused, but trying to continue\n");
+ printk("Do you have a strange power saving mode enabled?\n");
+}
+
+asmlinkage void default_do_nmi(struct pt_regs *regs)
+{
+ unsigned char reason = 0;
+
+ /* Only the BSP gets external NMIs from the system. */
+ if (!smp_processor_id())
+ reason = get_nmi_reason();
+
+ if (!(reason & 0xc0)) {
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
+ == NOTIFY_STOP)
+ return;
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * Ok, so this is none of the documented NMI sources,
+ * so it must be the NMI watchdog.
+ */
+ if (nmi_watchdog > 0) {
+ nmi_watchdog_tick(regs,reason);
+ return;
+ }
+#endif
+ unknown_nmi_error(reason, regs);
+ return;
+ }
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
+ return;
+
+ /* AK: following checks seem to be broken on modern chipsets. FIXME */
+
+ if (reason & 0x80)
+ mem_parity_error(reason, regs);
+ if (reason & 0x40)
+ io_check_error(reason, regs);
+
+ /*
+ * Reassert NMI in case it became active meanwhile
+ * as it's edge-triggered.
+ */
+ outb(0x8f, 0x70);
+ inb(0x71); /* dummy */
+ outb(0x0f, 0x70);
+ inb(0x71); /* dummy */
+}
+#endif
+
+asmlinkage void do_int3(struct pt_regs * regs, long error_code)
+{
+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
+ return;
+ }
+ do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
+ return;
+}
+
+/* runs on IST stack. */
+asmlinkage void *do_debug(struct pt_regs * regs, unsigned long error_code)
+{
+ struct pt_regs *pr;
+ unsigned long condition;
+ struct task_struct *tsk = current;
+ siginfo_t info;
+
+ pr = (struct pt_regs *)(current->thread.rsp0)-1;
+ if (regs->cs & 3) {
+ memcpy(pr, regs, sizeof(struct pt_regs));
+ regs = pr;
+ }
+
+#ifdef CONFIG_CHECKING
+ {
+ /* RED-PEN interaction with debugger - could destroy gs */
+ unsigned long gs;
+ struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
+ rdmsrl(MSR_GS_BASE, gs);
+ if (gs != (unsigned long)pda) {
+ wrmsrl(MSR_GS_BASE, pda);
+ printk("debug handler: wrong gs %lx expected %p\n", gs, pda);
+ }
+ }
+#endif
+
+ asm("movq %%db6,%0" : "=r" (condition));
+
+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+ SIGTRAP) == NOTIFY_STOP) {
+ return regs;
+ }
+ conditional_sti(regs);
+
+ /* Mask out spurious debug traps due to lazy DR7 setting */
+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+ if (!tsk->thread.debugreg7) {
+ goto clear_dr7;
+ }
+ }
+
+ tsk->thread.debugreg6 = condition;
+
+ /* Mask out spurious TF errors due to lazy TF clearing */
+ if ((condition & DR_STEP) &&
+ (notify_die(DIE_DEBUGSTEP, "debugstep", regs, condition,
+ 1, SIGTRAP) != NOTIFY_STOP)) {
+ /*
+ * The TF error should be masked out only if the current
+ * process is not traced and if the TRAP flag has been set
+ * previously by a tracing process (condition detected by
+ * the PT_DTRACE flag); remember that the i386 TRAP flag
+ * can be modified by the process itself in user mode,
+ * allowing programs to debug themselves without the ptrace()
+ * interface.
+ */
+ if ((regs->cs & 3) == 0)
+ goto clear_TF_reenable;
+ if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
+ goto clear_TF;
+ }
+
+ /* Ok, finally something we can handle */
+ tsk->thread.trap_no = 1;
+ tsk->thread.error_code = error_code;
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ if ((regs->cs & 3) == 0)
+ goto clear_dr7;
+
+ info.si_addr = (void __user *)regs->rip;
+ force_sig_info(SIGTRAP, &info, tsk);
+clear_dr7:
+ asm volatile("movq %0,%%db7"::"r"(0UL));
+ notify_die(DIE_DEBUG, "debug", regs, condition, 1, SIGTRAP);
+ return regs;
+
+clear_TF_reenable:
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+
+clear_TF:
+ /* RED-PEN could cause spurious errors */
+ if (notify_die(DIE_DEBUG, "debug2", regs, condition, 1, SIGTRAP)
+ != NOTIFY_STOP)
+ regs->eflags &= ~TF_MASK;
+ return regs;
+}
+
+static int kernel_math_error(struct pt_regs *regs, char *str)
+{
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ return 1;
+ }
+ notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE);
+#if 0
+ /* This should be a die, but warn only for now */
+ die(str, regs, 0);
+#else
+ printk(KERN_DEBUG "%s: %s at ", current->comm, str);
+ printk_address(regs->rip);
+ printk("\n");
+#endif
+ return 0;
+}
+
+/*
+ * Note that we play around with the 'TS' bit in an attempt to get
+ * the correct behaviour even in the presence of the asynchronous
+ * IRQ13 behaviour
+ */
+asmlinkage void do_coprocessor_error(struct pt_regs *regs)
+{
+ void __user *rip = (void __user *)(regs->rip);
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short cwd, swd;
+
+ conditional_sti(regs);
+ if ((regs->cs & 3) == 0 &&
+ kernel_math_error(regs, "kernel x87 math error"))
+ return;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 16;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = rip;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't synchronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(task);
+ swd = get_fpu_swd(task);
+ switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ case 0x041: /* Stack Fault */
+ case 0x241: /* Stack Fault | Direction */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void bad_intr(void)
+{
+ printk("bad interrupt");
+}
+
+asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
+{
+ void __user *rip = (void __user *)(regs->rip);
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short mxcsr;
+
+ conditional_sti(regs);
+ if ((regs->cs & 3) == 0 &&
+ kernel_math_error(regs, "simd math error"))
+ return;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 19;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = rip;
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ mxcsr = get_fpu_mxcsr(task);
+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
+{
+}
+
+#if 0
+asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
+{
+}
+#endif
+
+/*
+ * 'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task
+ *
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ */
+asmlinkage void math_state_restore(void)
+{
+ struct task_struct *me = current;
+ clts(); /* Allow maths ops (or we recurse) */
+
+ if (!used_math())
+ init_fpu(me);
+ restore_fpu_checking(&me->thread.i387.fxsave);
+ me->thread_info->status |= TS_USEDFPU;
+}
+
+void do_call_debug(struct pt_regs *regs)
+{
+ notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
+}
+
+
+static trap_info_t trap_table[] = {
+ { 0, 0, (__KERNEL_CS|0x3), 0, (unsigned long)divide_error },
+ { 1, 0, (__KERNEL_CS|0x3), 0, (unsigned long)debug },
+ { 3, 3, (__KERNEL_CS|0x3), 0, (unsigned long)int3 },
+ { 4, 3, (__KERNEL_CS|0x3), 0, (unsigned long)overflow },
+ { 5, 3, (__KERNEL_CS|0x3), 0, (unsigned long)bounds },
+ { 6, 0, (__KERNEL_CS|0x3), 0, (unsigned long)invalid_op },
+ { 7, 0, (__KERNEL_CS|0x3), 0, (unsigned long)device_not_available },
+ { 9, 0, (__KERNEL_CS|0x3), 0, (unsigned long)coprocessor_segment_overrun},
+ { 10, 0, (__KERNEL_CS|0x3), 0, (unsigned long)invalid_TSS },
+ { 11, 0, (__KERNEL_CS|0x3), 0, (unsigned long)segment_not_present },
+ { 12, 0, (__KERNEL_CS|0x3), 0, (unsigned long)stack_segment },
+ { 13, 0, (__KERNEL_CS|0x3), 0, (unsigned long)general_protection },
+ { 14, 0, (__KERNEL_CS|0x3), 0, (unsigned long)page_fault },
+ { 15, 0, (__KERNEL_CS|0x3), 0, (unsigned long)spurious_interrupt_bug },
+ { 16, 0, (__KERNEL_CS|0x3), 0, (unsigned long)coprocessor_error },
+ { 17, 0, (__KERNEL_CS|0x3), 0, (unsigned long)alignment_check },
+#ifdef CONFIG_X86_MCE
+ { 18, 0, (__KERNEL_CS|0x3), 0, (unsigned long)machine_check },
+#endif
+ { 19, 0, (__KERNEL_CS|0x3), 0, (unsigned long)simd_coprocessor_error },
+ { SYSCALL_VECTOR, 3, (__KERNEL_CS|0x3), 0, (unsigned long)system_call },
+ { 0, 0, 0, 0, 0 }
+};
+
+void __init trap_init(void)
+{
+ int ret;
+
+ ret = HYPERVISOR_set_trap_table(trap_table);
+
+ if (ret)
+ printk("HYPERVISOR_set_trap_table faild: error %d\n",
+ ret);
+
+#ifdef CONFIG_IA32_EMULATION
+ set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
+#endif
+
+ /*
+ * Should be a barrier for any external CPU state.
+ */
+ cpu_init();
+}
+
+
+/* Actual parsing is done early in setup.c. */
+static int __init oops_dummy(char *s)
+{
+ panic_on_oops = 1;
+ return -1;
+}
+__setup("oops=", oops_dummy);
+
+static int __init kstack_setup(char *s)
+{
+ kstack_depth_to_print = simple_strtoul(s,NULL,0);
+ return 0;
+}
+__setup("kstack=", kstack_setup);
+
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/vsyscall.c
+ *
+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ *
+ * Thanks to hpa@transmeta.com for some useful hint.
+ * Special thanks to Ingo Molnar for his early experience with
+ * a different vsyscall implementation for Linux/IA32 and for the name.
+ *
+ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
+ * at virtual address -10Mbyte+1024bytes etc... There are at max 8192
+ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
+ * jumping out of line if necessary.
+ *
+ * Note: the concept clashes with user mode linux. If you use UML just
+ * set the kernel.vsyscall sysctl to 0.
+ */
+
+/*
+ * TODO 2001-03-20:
+ *
+ * 1) make page fault handler detect faults on page1-page-last of the vsyscall
+ * virtual space, and make it increase %rip and write -ENOSYS in %rax (so
+ * we'll be able to upgrade to a new glibc without upgrading kernel after
+ * we add more vsyscalls.
+ * 2) Possibly we need a fixmap table for the vsyscalls too if we want
+ * to avoid SIGSEGV and we want to return -EFAULT from the vsyscalls as well.
+ * Can we segfault inside a "syscall"? We can fix this anytime and those fixes
+ * won't be visible for userspace. Not fixing this is a noop for correct programs,
+ * broken programs will segfault and there's no security risk until we choose to
+ * fix it.
+ *
+ * These are not urgent things that we need to address only before shipping the first
+ * production binary kernels.
+ */
+
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/seqlock.h>
+#include <linux/jiffies.h>
+
+#include <asm/vsyscall.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/fixmap.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+#define force_inline __attribute__((always_inline)) inline
+
+int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
+seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
+
+#include <asm/unistd.h>
+
+static force_inline void timeval_normalize(struct timeval * tv)
+{
+ time_t __sec;
+
+ __sec = tv->tv_usec / 1000000;
+ if (__sec)
+ {
+ tv->tv_usec %= 1000000;
+ tv->tv_sec += __sec;
+ }
+}
+
+static force_inline void do_vgettimeofday(struct timeval * tv)
+{
+ long sequence, t;
+ unsigned long sec, usec;
+
+ do {
+ sequence = read_seqbegin(&__xtime_lock);
+
+ sec = __xtime.tv_sec;
+ usec = (__xtime.tv_nsec / 1000) +
+ (__jiffies - __wall_jiffies) * (1000000 / HZ);
+
+ if (__vxtime.mode == VXTIME_TSC) {
+ sync_core();
+ rdtscll(t);
+ if (t < __vxtime.last_tsc) t = __vxtime.last_tsc;
+ usec += ((t - __vxtime.last_tsc) *
+ __vxtime.tsc_quot) >> 32;
+ /* See comment in x86_64 do_gettimeofday. */
+ } else {
+ usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
+ __vxtime.last) * __vxtime.quot) >> 32;
+ }
+ } while (read_seqretry(&__xtime_lock, sequence));
+
+ tv->tv_sec = sec + usec / 1000000;
+ tv->tv_usec = usec % 1000000;
+}
+
+/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
+static force_inline void do_get_tz(struct timezone * tz)
+{
+ *tz = __sys_tz;
+}
+
+
+static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ int ret;
+ asm volatile("syscall"
+ : "=a" (ret)
+ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
+ return ret;
+}
+
+static force_inline long time_syscall(long *t)
+{
+ long secs;
+ asm volatile("syscall"
+ : "=a" (secs)
+ : "0" (__NR_time),"D" (t) : __syscall_clobber);
+ return secs;
+}
+
+static int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
+{
+ if (unlikely(!__sysctl_vsyscall))
+ return gettimeofday(tv,tz);
+ if (tv)
+ do_vgettimeofday(tv);
+ if (tz)
+ do_get_tz(tz);
+ return 0;
+}
+
+/* This will break when the xtime seconds get inaccurate, but that is
+ * unlikely */
+static time_t __vsyscall(1) vtime(time_t *t)
+{
+ if (unlikely(!__sysctl_vsyscall))
+ return time_syscall(t);
+ else if (t)
+ *t = __xtime.tv_sec;
+ return __xtime.tv_sec;
+}
+
+static long __vsyscall(2) venosys_0(void)
+{
+ return -ENOSYS;
+}
+
+static long __vsyscall(3) venosys_1(void)
+{
+ return -ENOSYS;
+
+}
+
+static void __init map_vsyscall(void)
+{
+ extern char __vsyscall_0;
+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+
+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+}
+
+extern void __set_fixmap_user (enum fixed_addresses, unsigned long, pgprot_t);
+
+static void __init map_vsyscall_user(void)
+{
+ extern char __vsyscall_0;
+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+
+ __set_fixmap_user(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+}
+
+static int __init vsyscall_init(void)
+{
+ BUG_ON(((unsigned long) &vgettimeofday !=
+ VSYSCALL_ADDR(__NR_vgettimeofday)));
+ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
+ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
+ map_vsyscall();
+ map_vsyscall_user(); /* establish tranlation for user address space */
+ sysctl_vsyscall = 1;
+
+ return 0;
+}
+
+__initcall(vsyscall_init);
--- /dev/null
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/apm_bios.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/tty.h>
+#include <linux/ioctl32.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/mmx.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/nmi.h>
+#include <asm/kdebug.h>
+#include <asm/unistd.h>
+#include <asm/tlbflush.h>
+#include <asm/kdebug.h>
+
+extern spinlock_t rtc_lock;
+
+#ifdef CONFIG_SMP
+extern void __write_lock_failed(rwlock_t *rw);
+extern void __read_lock_failed(rwlock_t *rw);
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+extern struct drive_info_struct drive_info;
+EXPORT_SYMBOL(drive_info);
+#endif
+
+extern unsigned long get_cmos_time(void);
+
+/* platform dependent support */
+EXPORT_SYMBOL(boot_cpu_data);
+//EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(probe_irq_mask);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(pm_idle);
+// EXPORT_SYMBOL(pm_power_off);
+EXPORT_SYMBOL(get_cmos_time);
+
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__down_failed_trylock);
+EXPORT_SYMBOL(__up_wakeup);
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(ip_compute_csum);
+/* Delay loops */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
+EXPORT_SYMBOL(__put_user_1);
+EXPORT_SYMBOL(__put_user_2);
+EXPORT_SYMBOL(__put_user_4);
+EXPORT_SYMBOL(__put_user_8);
+
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+EXPORT_SYMBOL(strncpy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(copy_user_generic);
+EXPORT_SYMBOL(copy_from_user);
+EXPORT_SYMBOL(copy_to_user);
+EXPORT_SYMBOL(copy_in_user);
+EXPORT_SYMBOL(strnlen_user);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+#endif
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pcibios_penalize_isa_irq);
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
+
+EXPORT_SYMBOL(cpu_pda);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(__write_lock_failed);
+EXPORT_SYMBOL(__read_lock_failed);
+
+EXPORT_SYMBOL(synchronize_irq);
+EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(cpu_callout_map);
+#endif
+
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(get_wchan);
+
+EXPORT_SYMBOL(rtc_lock);
+
+/* EXPORT_SYMBOL_GPL(set_nmi_callback);
+ EXPORT_SYMBOL_GPL(unset_nmi_callback); */
+
+/* Export string functions. We normally rely on gcc builtin for most of these,
+ but gcc sometimes decides not to inline them. */
+#undef memcpy
+#undef memset
+#undef memmove
+#undef memchr
+#undef strlen
+#undef strcpy
+#undef strncmp
+#undef strncpy
+#undef strchr
+#undef strcmp
+#undef strcpy
+#undef strcat
+#undef memcmp
+
+extern void * memset(void *,int,__kernel_size_t);
+extern size_t strlen(const char *);
+extern void * memmove(void * dest,const void *src,size_t count);
+extern char * strcpy(char * dest,const char *src);
+extern int strcmp(const char * cs,const char * ct);
+extern void *memchr(const void *s, int c, size_t n);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+extern void * __memcpy(void *,const void *,__kernel_size_t);
+extern char * strcat(char *, const char *);
+extern int memcmp(const void * cs,const void * ct,size_t count);
+
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(memcmp);
+
+#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
+/* prototypes are wrong, these are assembly with custom calling functions */
+extern void rwsem_down_read_failed_thunk(void);
+extern void rwsem_wake_thunk(void);
+extern void rwsem_downgrade_thunk(void);
+extern void rwsem_down_write_failed_thunk(void);
+EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
+EXPORT_SYMBOL(rwsem_wake_thunk);
+EXPORT_SYMBOL(rwsem_downgrade_thunk);
+EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
+#endif
+
+EXPORT_SYMBOL(empty_zero_page);
+
+#ifdef CONFIG_HAVE_DEC_LOCK
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#endif
+
+EXPORT_SYMBOL(die_chain);
+EXPORT_SYMBOL(register_die_notifier);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(cpu_sibling_map);
+EXPORT_SYMBOL(smp_num_siblings);
+#endif
+
+extern void do_softirq_thunk(void);
+EXPORT_SYMBOL(do_softirq_thunk);
+
+void out_of_line_bug(void);
+EXPORT_SYMBOL(out_of_line_bug);
+
+EXPORT_SYMBOL(init_level4_pgt);
+
+extern unsigned long __supported_pte_mask;
+EXPORT_SYMBOL(__supported_pte_mask);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL_GPL(flush_tlb_all);
+#endif
+
+EXPORT_SYMBOL(cpu_khz);
--- /dev/null
+#
+# Makefile for the linux x86_64-specific parts of the memory manager.
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/mm
+
+obj-y := init.o fault.o ioremap.o pageattr.o hypervisor.o
+c-obj-y := extable.o
+
+#obj-y := init.o fault.o ioremap.o extable.o pageattr.o
+#c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+c-obj-$(CONFIG_DISCONTIGMEM) += numa.o
+c-obj-$(CONFIG_K8_NUMA) += k8topology.o
+
+hugetlbpage-y = ../../../i386/mm/hugetlbpage.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/x86_64/mm/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
--- /dev/null
+/*
+ * linux/arch/x86-64/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/kprobes.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/proto.h>
+#include <asm/kdebug.h>
+#include <asm-generic/sections.h>
+#include <asm/kdebug.h>
+
+DEFINE_PER_CPU(pgd_t *, cur_pgd);
+
+void bust_spinlocks(int yes)
+{
+ int loglevel_save = console_loglevel;
+ if (yes) {
+ oops_in_progress = 1;
+ } else {
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the console up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+ }
+}
+
+/* Sometimes the CPU reports invalid exceptions on prefetch.
+ Check that here and ignore.
+ Opcode checker based on code by Richard Brunner */
+static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
+{
+ unsigned char *instr = (unsigned char *)(regs->rip);
+ int scan_more = 1;
+ int prefetch = 0;
+ unsigned char *max_instr = instr + 15;
+
+ /* If it was a exec fault ignore */
+ if (error_code & (1<<4))
+ return 0;
+
+ /* Code segments in LDT could have a non zero base. Don't check
+ when that's possible */
+ if (regs->cs & (1<<2))
+ return 0;
+
+ if ((regs->cs & 3) != 0 && regs->rip >= TASK_SIZE)
+ return 0;
+
+ while (scan_more && instr < max_instr) {
+ unsigned char opcode;
+ unsigned char instr_hi;
+ unsigned char instr_lo;
+
+ if (__get_user(opcode, instr))
+ break;
+
+ instr_hi = opcode & 0xf0;
+ instr_lo = opcode & 0x0f;
+ instr++;
+
+ switch (instr_hi) {
+ case 0x20:
+ case 0x30:
+ /* Values 0x26,0x2E,0x36,0x3E are valid x86
+ prefixes. In long mode, the CPU will signal
+ invalid opcode if some of these prefixes are
+ present so we will never get here anyway */
+ scan_more = ((instr_lo & 7) == 0x6);
+ break;
+
+ case 0x40:
+ /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
+ Need to figure out under what instruction mode the
+ instruction was issued ... */
+ /* Could check the LDT for lm, but for now it's good
+ enough to assume that long mode only uses well known
+ segments or kernel. */
+ scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS);
+ break;
+
+ case 0x60:
+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
+ scan_more = (instr_lo & 0xC) == 0x4;
+ break;
+ case 0xF0:
+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
+ scan_more = !instr_lo || (instr_lo>>1) == 1;
+ break;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+ scan_more = 0;
+ if (__get_user(opcode, instr))
+ break;
+ prefetch = (instr_lo == 0xF) &&
+ (opcode == 0x0D || opcode == 0x18);
+ break;
+ default:
+ scan_more = 0;
+ break;
+ }
+ }
+ return prefetch;
+}
+
+static int bad_address(void *p)
+{
+ unsigned long dummy;
+ return __get_user(dummy, (unsigned long *)p);
+}
+
+void dump_pagetable(unsigned long address)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
+ pgd += pgd_index(address);
+
+ printk("PGD %lx ", pgd_val(*pgd));
+ if (bad_address(pgd)) goto bad;
+ if (!pgd_present(*pgd)) goto ret;
+
+ pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
+ if (bad_address(pud)) goto bad;
+ printk("PUD %lx ", pud_val(*pud));
+ if (!pud_present(*pud)) goto ret;
+
+ pmd = pmd_offset(pud, address);
+ if (bad_address(pmd)) goto bad;
+ printk("PMD %lx ", pmd_val(*pmd));
+ if (!pmd_present(*pmd)) goto ret;
+
+ pte = pte_offset_kernel(pmd, address);
+ if (bad_address(pte)) goto bad;
+ printk("PTE %lx", pte_val(*pte));
+ret:
+ printk("\n");
+ return;
+bad:
+ printk("BAD\n");
+}
+
+static const char errata93_warning[] =
+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
+KERN_ERR "******* Please consider a BIOS update.\n"
+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
+
+/* Workaround for K8 erratum #93 & buggy BIOS.
+ BIOS SMM functions are required to use a specific workaround
+ to avoid corruption of the 64bit RIP register on C stepping K8.
+ A lot of BIOS that didn't get tested properly miss this.
+ The OS sees this as a page fault with the upper 32bits of RIP cleared.
+ Try to work around it here.
+ Note we only handle faults in kernel here. */
+
+static int is_errata93(struct pt_regs *regs, unsigned long address)
+{
+ static int warned;
+ if (address != regs->rip)
+ return 0;
+ if ((address >> 32) != 0)
+ return 0;
+ address |= 0xffffffffUL << 32;
+ if ((address >= (u64)_stext && address <= (u64)_etext) ||
+ (address >= MODULES_VADDR && address <= MODULES_END)) {
+ if (!warned) {
+ printk(errata93_warning);
+ warned = 1;
+ }
+ regs->rip = address;
+ return 1;
+ }
+ return 0;
+}
+
+int unhandled_signal(struct task_struct *tsk, int sig)
+{
+ if (tsk->pid == 1)
+ return 1;
+ /* Warn for strace, but not for gdb */
+ if (!test_ti_thread_flag(tsk->thread_info, TIF_SYSCALL_TRACE) &&
+ (tsk->ptrace & PT_PTRACED))
+ return 0;
+ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
+}
+
+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
+ unsigned long error_code)
+{
+ oops_begin();
+ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
+ current->comm, address);
+ dump_pagetable(address);
+ __die("Bad pagetable", regs, error_code);
+ oops_end();
+ do_exit(SIGKILL);
+}
+
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ */
+static int vmalloc_fault(unsigned long address)
+{
+ pgd_t *pgd, *pgd_ref;
+ pud_t *pud, *pud_ref;
+ pmd_t *pmd, *pmd_ref;
+ pte_t *pte, *pte_ref;
+
+ /* Copy kernel mappings over when needed. This can also
+ happen within a race in page table update. In the later
+ case just flush. */
+
+ pgd = pgd_offset(current->mm ?: &init_mm, address);
+ pgd_ref = pgd_offset_k(address);
+ if (pgd_none(*pgd_ref))
+ return -1;
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+
+ /* Below here mismatches are bugs because these lower tables
+ are shared */
+
+ pud = pud_offset(pgd, address);
+ pud_ref = pud_offset(pgd_ref, address);
+ if (pud_none(*pud_ref))
+ return -1;
+ if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
+ BUG();
+ pmd = pmd_offset(pud, address);
+ pmd_ref = pmd_offset(pud_ref, address);
+ if (pmd_none(*pmd_ref))
+ return -1;
+ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+ BUG();
+ pte_ref = pte_offset_kernel(pmd_ref, address);
+ if (!pte_present(*pte_ref))
+ return -1;
+ pte = pte_offset_kernel(pmd, address);
+ if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref))
+ BUG();
+ __flush_tlb_all();
+ return 0;
+}
+
+int page_fault_trace = 0;
+int exception_trace = 1;
+
+
+#define MEM_VERBOSE 1
+
+#ifdef MEM_VERBOSE
+#define MEM_LOG(_f, _a...) \
+ printk("fault.c:[%d]-> " _f "\n", \
+ __LINE__ , ## _a )
+#else
+#define MEM_LOG(_f, _a...) ((void)0)
+#endif
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ * bit 3 == 1 means fault was an instruction fetch
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ const struct exception_table_entry *fixup;
+ int write;
+ siginfo_t info;
+
+ if (!user_mode(regs))
+ error_code &= ~4; /* means kernel */
+
+#ifdef CONFIG_CHECKING
+ {
+ unsigned long gs;
+ struct x8664_pda *pda = cpu_pda + stack_smp_processor_id();
+ rdmsrl(MSR_GS_BASE, gs);
+ if (gs != (unsigned long)pda) {
+ wrmsrl(MSR_GS_BASE, pda);
+ printk("page_fault: wrong gs %lx expected %p\n", gs, pda);
+ }
+ }
+#endif
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ if (likely(regs->eflags & X86_EFLAGS_IF))
+ local_irq_enable();
+
+ if (unlikely(page_fault_trace))
+ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
+ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
+
+ tsk = current;
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * (error_code & 4) == 0, and that the fault was not a
+ * protection error (error_code & 1) == 0.
+ */
+ if (unlikely(address >= TASK_SIZE)) {
+ if (!(error_code & 5)) {
+ if (vmalloc_fault(address) < 0)
+ goto bad_area_nosemaphore;
+ return;
+ }
+ /*
+ * Don't take the mm semaphore here. If we fixup a prefetch
+ * fault we could otherwise deadlock.
+ */
+ goto bad_area_nosemaphore;
+ }
+
+ if (unlikely(error_code & (1 << 3)))
+ pgtable_bad(address, regs, error_code);
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (unlikely(in_atomic() || !mm))
+ goto bad_area_nosemaphore;
+
+ again:
+ /* When running in the kernel we expect faults to occur only to
+ * addresses in user space. All other faults represent errors in the
+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * erroneous fault occuring in a code path which already holds mmap_sem
+ * we will deadlock attempting to validate the fault against the
+ * address space. Luckily the kernel only validly references user
+ * space from well defined areas of code, which are listed in the
+ * exceptions table.
+ *
+ * As the vast majority of faults will be valid we will only perform
+ * the source reference check when there is a possibilty of a deadlock.
+ * Attempt to lock the address space, if we cannot we then validate the
+ * source. If this is invalid we can skip the address space check,
+ * thus avoiding the deadlock.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if ((error_code & 4) == 0 &&
+ !search_exception_tables(regs->rip))
+ goto bad_area_nosemaphore;
+ down_read(&mm->mmap_sem);
+ }
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (likely(vma->vm_start <= address))
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (error_code & 4) {
+ // XXX: align red zone size with ABI
+ if (address + 128 < regs->rsp)
+ goto bad_area;
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+ default: /* 3: write, present */
+ /* fall through */
+ case 2: /* write, not present */
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+ case 1: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ switch (handle_mm_fault(mm, vma, address, write)) {
+ case 1:
+ tsk->min_flt++;
+ break;
+ case 2:
+ tsk->maj_flt++;
+ break;
+ case 0:
+ goto do_sigbus;
+ default:
+ goto out_of_memory;
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+
+#ifdef CONFIG_IA32_EMULATION
+ /* 32bit vsyscall. map on demand. */
+ if (test_thread_flag(TIF_IA32) &&
+ address >= VSYSCALL32_BASE && address < VSYSCALL32_END) {
+ if (map_syscall32(mm, address) < 0)
+ goto out_of_memory2;
+ return;
+ }
+#endif
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & 4) {
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ /* Work around K8 erratum #100 K8 in compat mode
+ occasionally jumps to illegal addresses >4GB. We
+ catch this here in the page fault handler because
+ these addresses are not reachable. Just detect this
+ case and return. Any code segment in LDT is
+ compatibility mode. */
+ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
+ (address >> 32))
+ return;
+
+ if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
+ printk(
+ "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
+ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
+ tsk->comm, tsk->pid, address, regs->rip,
+ regs->rsp, error_code);
+ }
+
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+
+ /* Are we prepared to handle this kernel fault? */
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ return;
+ }
+
+ /*
+ * Hall of shame of CPU/BIOS bugs.
+ */
+
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ if (is_errata93(regs, address))
+ return;
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ oops_begin();
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at %016lx RIP: \n" KERN_ALERT,address);
+ printk_address(regs->rip);
+ printk("\n");
+ dump_pagetable(address);
+ __die("Oops", regs, error_code);
+ /* Executive summary in case the body of the oops scrolled away */
+ printk(KERN_EMERG "CR2: %016lx\n", address);
+ oops_end();
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (current->pid == 1) {
+ yield();
+ goto again;
+ }
+ printk("VM: killing process %s\n", tsk->comm);
+ if (error_code & 4)
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & 4))
+ goto no_context;
+
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+ return;
+}
--- /dev/null
+/******************************************************************************
+ * mm/hypervisor.c
+ *
+ * Update page tables via the hypervisor.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+/*
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Added hypercalls for x86-64.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/multicall.h>
+#include <asm-xen/balloon.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include <linux/percpu.h>
+#endif
+
+/*
+ * This suffices to protect us if we ever move to SMP domains.
+ * Further, it protects us against interrupts. At the very least, this is
+ * required for the network driver which flushes the update queue before
+ * pushing new receive buffers.
+ */
+static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
+
+/* Linux 2.6 isn't using the traditional batched interface. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define QUEUE_SIZE 2048
+#define pte_offset_kernel pte_offset
+#else
+#ifdef CONFIG_SMP
+#define QUEUE_SIZE 1
+#else
+/* #define QUEUE_SIZE 128 */
+#define QUEUE_SIZE 1
+#endif
+#endif
+
+DEFINE_PER_CPU(mmu_update_t, update_queue[QUEUE_SIZE]);
+DEFINE_PER_CPU(unsigned int, mmu_update_queue_idx);
+
+/*
+ * MULTICALL_flush_page_update_queue:
+ * This is a version of the flush which queues as part of a multicall.
+ */
+void MULTICALL_flush_page_update_queue(void)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ unsigned int _idx;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ if ( (_idx = idx) != 0 )
+ {
+ per_cpu(mmu_update_queue_idx, cpu) = 0;
+ wmb(); /* Make sure index is cleared first to avoid double updates. */
+ queue_multicall3(__HYPERVISOR_mmu_update,
+ (unsigned long)&per_cpu(update_queue[0], cpu),
+ (unsigned long)_idx,
+ (unsigned long)NULL);
+ }
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void __flush_page_update_queue(void)
+{
+ int cpu = smp_processor_id();
+ unsigned int _idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(mmu_update_queue_idx, cpu) = 0;
+ wmb(); /* Make sure index is cleared first to avoid double updates. */
+ if ( unlikely(HYPERVISOR_mmu_update(&per_cpu(update_queue[0], cpu), _idx, NULL) < 0) )
+ {
+ printk(KERN_ALERT "Failed to execute MMU updates (returning to %lx)\n",
+ (unsigned long)__builtin_return_address(0));
+ BUG();
+ }
+}
+
+void _flush_page_update_queue(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ if ( per_cpu(mmu_update_queue_idx, cpu) != 0 ) __flush_page_update_queue();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void increment_index(void)
+{
+ int cpu = smp_processor_id();
+ per_cpu(mmu_update_queue_idx, cpu)++;
+ if ( unlikely(per_cpu(mmu_update_queue_idx, cpu) == QUEUE_SIZE) ) __flush_page_update_queue();
+}
+
+static inline void increment_index_and_flush(void)
+{
+ int cpu = smp_processor_id();
+ per_cpu(mmu_update_queue_idx, cpu)++;
+ __flush_page_update_queue();
+}
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_l2_entry_update(pmd_t *ptr, pmd_t val)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val.pmd;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+
+void queue_pt_switch(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_tlb_flush(void)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_invlpg(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_pin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L4_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_unpin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pud_pin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L3_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pud_unpin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pmd_pin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pmd_unpin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_pin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_unpin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_set_ldt(unsigned long ptr, unsigned long len)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ per_cpu(update_queue[idx], cpu).val = pfn;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+/* queue and flush versions of the above */
+void xen_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_l3_entry_update(pud_t *ptr, unsigned long val)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_l4_entry_update(pgd_t *ptr, unsigned long val)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pt_switch(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_new_user_pt(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_USER_BASEPTR;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_tlb_flush(void)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_load_gs(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_LOAD_GS;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_invlpg(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pgd_pin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L4_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pgd_unpin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pud_pin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L3_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pud_unpin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pmd_pin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pmd_unpin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pte_pin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pte_unpin(unsigned long ptr)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_set_ldt(unsigned long ptr, unsigned long len)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr;
+ per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ per_cpu(update_queue[idx], cpu).val = pfn;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+
+unsigned long allocate_empty_lowmem_region(unsigned long pages)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long *pfn_array;
+ unsigned long vstart;
+ unsigned long i;
+ unsigned int order = get_order(pages*PAGE_SIZE);
+
+ vstart = __get_free_pages(GFP_KERNEL, order);
+ if ( vstart == 0 )
+ return 0UL;
+
+ printk("vstart = %lx, order = %d\n", vstart, order);
+
+ scrub_pages(vstart, 1 << order);
+
+ pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+ if ( pfn_array == NULL )
+ BUG();
+
+ for ( i = 0; i < (1<<order); i++ )
+ {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn_array[i] = pte->pte >> PAGE_SHIFT;
+ queue_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = (u32)INVALID_P2M_ENTRY;
+ }
+
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
+
+ balloon_put_pages(pfn_array, 1 << order);
+
+ vfree(pfn_array);
+
+ return vstart;
+}
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
--- /dev/null
+/*
+ * linux/arch/x86_64/mm/init.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen.
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/proc_fs.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/dma.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/apic.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/proto.h>
+#include <asm/smp.h>
+
+#ifndef Dprintk
+#define Dprintk(x...)
+#endif
+
+#ifdef CONFIG_GART_IOMMU
+extern int swiotlb;
+#endif
+
+extern char _stext[];
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+extern unsigned long start_pfn;
+
+static int init_mapping_done;
+
+/*
+ * Use this until direct mapping is established, i.e. before __va() is
+ * avaialble in init_memory_mapping().
+ */
+
+#define addr_to_page(addr, page) \
+ (addr) &= PHYSICAL_PAGE_MASK; \
+ (page) = ((unsigned long *) ((unsigned long)(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map)))
+
+static void __make_page_readonly(unsigned long va)
+{
+ unsigned long addr;
+ pte_t *pte;
+ unsigned long *page = (unsigned long *) init_level4_pgt;
+
+ addr = (unsigned long) page[pgd_index(va)];
+ addr_to_page(addr, page);
+
+ addr = page[pud_index(va)];
+ addr_to_page(addr, page);
+
+ addr = page[pmd_index(va)];
+ addr_to_page(addr, page);
+
+ pte = (pte_t *) &page[pte_index(va)];
+ xen_l1_entry_update(pte, (*(unsigned long*)pte) & ~_PAGE_RW);
+ __flush_tlb_one(addr);
+}
+
+static void __make_page_writable(unsigned long va)
+{
+ unsigned long addr;
+ pte_t *pte;
+ unsigned long *page = (unsigned long *) init_level4_pgt;
+
+ addr = (unsigned long) page[pgd_index(va)];
+ addr_to_page(addr, page);
+
+ addr = page[pud_index(va)];
+ addr_to_page(addr, page);
+
+ addr = page[pmd_index(va)];
+ addr_to_page(addr, page);
+
+ pte = (pte_t *) &page[pte_index(va)];
+ xen_l1_entry_update(pte, (*(unsigned long*)pte)| _PAGE_RW);
+ __flush_tlb_one(addr);
+}
+
+
+/*
+ * Assume the tranlation is already established.
+ */
+void make_page_readonly(void *va)
+{
+ pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t *pte;
+ unsigned long addr = (unsigned long) va;
+
+ if (!init_mapping_done) {
+ __make_page_readonly(addr);
+ return;
+ }
+
+ pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
+ pte = pte_offset_kernel(pmd, addr);
+ queue_l1_entry_update(pte, (*(unsigned long*)pte)&~_PAGE_RW);
+ __flush_tlb_one(addr);
+}
+
+void make_page_writable(void *va)
+{
+ pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t *pte;
+ unsigned long addr = (unsigned long) va;
+
+ if (!init_mapping_done) {
+ __make_page_writable(addr);
+ return;
+ }
+
+ pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
+ pte = pte_offset_kernel(pmd, addr);
+ queue_l1_entry_update(pte, (*(unsigned long*)pte)|_PAGE_RW);
+ __flush_tlb_one(addr);
+}
+
+void make_pages_readonly(void* va, unsigned nr)
+{
+ while ( nr-- != 0 ) {
+ make_page_readonly(va);
+ va = (void*)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+void make_pages_writable(void* va, unsigned nr)
+{
+ while ( nr-- != 0 ) {
+ make_page_writable(va);
+ va = (void*)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+/*
+ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
+ * physical space so we can cache the place of the first one and move
+ * around without checking the pgd every time.
+ */
+
+void show_mem(void)
+{
+ int i, total = 0, reserved = 0;
+ int shared = 0, cached = 0;
+ pg_data_t *pgdat;
+ struct page *page;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+
+ for_each_pgdat(pgdat) {
+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+ page = pfn_to_page(pgdat->node_start_pfn + i);
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page) - 1;
+ }
+ }
+ printk("%d pages of RAM\n", total);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+}
+
+/* References to section boundaries */
+
+extern char _text, _etext, _edata, __bss_start, _end[];
+extern char __init_begin, __init_end;
+
+int after_bootmem;
+
+static void *spp_getpage(void)
+{
+ void *ptr;
+ if (after_bootmem)
+ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
+ else
+ ptr = alloc_bootmem_pages(PAGE_SIZE);
+ if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
+ panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
+
+ Dprintk("spp_getpage %p\n", ptr);
+ return ptr;
+}
+
+#define pgd_offset_u(address) (pgd_t *)(init_level4_user_pgt + pgd_index(address))
+
+static inline pud_t *pud_offset_u(unsigned long address)
+{
+ pud_t *pud = level3_user_pgt;
+
+ return pud + pud_index(address);
+}
+
+static void set_pte_phys(unsigned long vaddr,
+ unsigned long phys, pgprot_t prot, int user_mode)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte, new_pte;
+
+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
+
+ pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
+
+ if (pgd_none(*pgd)) {
+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
+ return;
+ }
+
+ pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
+
+ if (pud_none(*pud)) {
+ pmd = (pmd_t *) spp_getpage();
+
+ make_page_readonly(pmd);
+ xen_pmd_pin(__pa(pmd));
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+ if (pmd != pmd_offset(pud, 0)) {
+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
+ return;
+ }
+ }
+
+ pmd = pmd_offset(pud, vaddr);
+
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) spp_getpage();
+ make_page_readonly(pte);
+
+ xen_pte_pin(__pa(pte));
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
+ if (pte != pte_offset_kernel(pmd, 0)) {
+ printk("PAGETABLE BUG #02!\n");
+ return;
+ }
+ }
+ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
+
+ pte = pte_offset_kernel(pmd, vaddr);
+
+ if (!pte_none(*pte) &&
+ pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
+ pte_ERROR(*pte);
+ xen_l1_entry_update(pte, new_pte.pte);
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+static void set_pte_phys_ma(unsigned long vaddr,
+ unsigned long phys, pgprot_t prot)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte, new_pte;
+
+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
+
+ pgd = pgd_offset_k(vaddr);
+ if (pgd_none(*pgd)) {
+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
+ return;
+ }
+ pud = pud_offset(pgd, vaddr);
+ if (pud_none(*pud)) {
+
+ pmd = (pmd_t *) spp_getpage();
+ make_page_readonly(pmd);
+ xen_pmd_pin(__pa(pmd));
+
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+
+ if (pmd != pmd_offset(pud, 0)) {
+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
+ return;
+ }
+ }
+ pmd = pmd_offset(pud, vaddr);
+
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) spp_getpage();
+ make_page_readonly(pte);
+ xen_pte_pin(__pa(pte));
+
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
+ if (pte != pte_offset_kernel(pmd, 0)) {
+ printk("PAGETABLE BUG #02!\n");
+ return;
+ }
+ }
+
+ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
+ pte = pte_offset_kernel(pmd, vaddr);
+
+ if (!pte_none(*pte) &&
+ pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
+ pte_ERROR(*pte);
+
+ /*
+ * Note that the pte page is already RO, thus we want to use
+ * xen_l1_entry_update(), not set_pte().
+ */
+ xen_l1_entry_update(pte,
+ (pfn_pte_ma(phys >> PAGE_SHIFT, prot).pte));
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+#define SET_FIXMAP_KERNEL 0
+#define SET_FIXMAP_USER 1
+
+/* NOTE: this is meant to be run only at boot */
+void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ printk("Invalid __set_fixmap\n");
+ return;
+ }
+ set_pte_phys(address, phys, prot, SET_FIXMAP_KERNEL);
+}
+
+
+/*
+ * At this point it only supports vsyscall area.
+ */
+void __set_fixmap_user (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ printk("Invalid __set_fixmap\n");
+ return;
+ }
+
+ set_pte_phys(address, phys, prot, SET_FIXMAP_USER);
+
+#if 0
+ page = (unsigned long *) user_level3_pgt;
+ pud = page[pud_index(address)];
+
+ printk("pud = %p\n", pud);
+
+ pmd = (pmd_t *) spp_getpage();
+ printk("alloc pmd = %p\n", pmd);
+
+ make_page_readonly((unsigned long)pmd);
+
+ xen_pmd_pin(__pa(pmd));
+
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+
+ printk("after set_pud\n");
+
+ pte = (pte_t *) spp_getpage();
+ printk("pte = %p\n");
+
+ make_page_readonly((unsigned long)pte);
+
+ xen_pte_pin(__pa(pte));
+
+ page = (unsigned long *) pud;
+ pud = page[pud_index(address)];
+
+
+ pmd = pmd_offset(pud, vaddr);
+
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
+#endif
+
+}
+
+
+void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ printk("Invalid __set_fixmap\n");
+ return;
+ }
+ set_pte_phys_ma(address, phys, prot);
+}
+
+unsigned long __initdata table_start, table_end, tables_reserved;
+
+#if 0
+/*
+ * Get the machine PFN given va
+ */
+static unsigned long get_machine_pfn(unsigned long va)
+{
+ unsigned long addr;
+ pte_t *pte;
+
+ unsigned long *page = (unsigned long *) init_level4_pgt;
+
+ addr = (unsigned long) page[pgd_index(va)];
+ addr &= PHYSICAL_PAGE_MASK;
+ page = (unsigned long *) ((unsigned long)(((mfn_to_pfn(addr >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map));
+
+ addr = page[pud_index(va)];
+ addr &= PHYSICAL_PAGE_MASK;
+ page = (unsigned long *) ((unsigned long)(((mfn_to_pfn(addr >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map));
+
+ addr = page[pmd_index(va)];
+ addr &= PHYSICAL_PAGE_MASK;
+ page = (unsigned long *) ((unsigned long)(((mfn_to_pfn(addr >> PAGE_SHIFT)) << PAGE_SHIFT)+ __START_KERNEL_map));
+
+ pte = (pte_t *) &page[pte_index(va)];
+
+ return (unsigned long) (pte->pte >> PAGE_SHIFT);
+}
+#endif
+
+unsigned long get_machine_pfn(unsigned long addr)
+{
+ pud_t* pud = pud_offset_k(addr);
+ pmd_t* pmd = pmd_offset(pud, addr);
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+
+ return (pte->pte >> PAGE_SHIFT);
+}
+
+
+/*
+ * We start using from start_pfn
+ */
+static __init void *alloc_low_page(unsigned long *phys)
+{
+ unsigned long pfn = table_end++;
+
+ *phys = (pfn << PAGE_SHIFT);
+ memset((void *) ((pfn << PAGE_SHIFT) + __START_KERNEL_map), 0, PAGE_SIZE);
+
+ return (void *)((pfn << PAGE_SHIFT) + __START_KERNEL_map);
+}
+
+#define PTE_SIZE PAGE_SIZE
+
+static inline void __set_pte(pte_t *dst, pte_t val)
+{
+ *dst = val;
+}
+
+void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+{
+ long i, j, k;
+ unsigned long paddr;
+
+ i = pud_index(address);
+ pud = pud + i;
+
+ for (; i < PTRS_PER_PUD; pud++, i++) {
+ unsigned long pmd_phys;
+ pmd_t *pmd;
+
+ paddr = address + i*PUD_SIZE;
+ if (paddr >= end) {
+ for (; i < PTRS_PER_PUD; i++, pud++)
+ set_pud(pud, __pud(0));
+ break;
+ }
+
+ pmd = alloc_low_page(&pmd_phys);
+ make_page_readonly(pmd);
+ xen_pmd_pin(pmd_phys);
+ set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
+
+ for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
+ unsigned long pte_phys;
+ pte_t *pte, *pte_save;
+
+ if (paddr >= end) {
+ for (; j < PTRS_PER_PMD; j++, pmd++)
+ set_pmd(pmd, __pmd(0));
+ break;
+ }
+ pte = alloc_low_page(&pte_phys);
+ pte_save = pte;
+ for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) {
+ if (paddr < (table_start << PAGE_SHIFT)
+ + tables_reserved)
+ {
+ __set_pte(pte,
+ __pte(paddr | (_KERNPG_TABLE & ~_PAGE_RW)));
+ continue;
+ }
+ if (paddr >= end) {
+ for (; k < PTRS_PER_PTE; k++, pte++)
+ __set_pte(pte, __pte(0));
+ break;
+ }
+ __set_pte(pte, __pte(paddr | _KERNPG_TABLE));
+ }
+ pte = pte_save;
+ make_page_readonly(pte);
+ xen_pte_pin(pte_phys);
+ set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
+ }
+ }
+ __flush_tlb();
+}
+
+static void __init find_early_table_space(unsigned long end)
+{
+ unsigned long puds, pmds, ptes;
+
+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+ ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
+
+ tables_reserved = round_up(puds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE)
+ + round_up(ptes * 8, PAGE_SIZE);
+ table_start = start_pfn;
+ table_end = table_start;
+}
+
+/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
+ This runs before bootmem is initialized and gets pages directly from the
+ physical memory. To access them they are temporarily mapped. */
+void __init init_memory_mapping(unsigned long start, unsigned long end)
+{
+ unsigned long next;
+
+
+ Dprintk("init_memory_mapping\n");
+
+ find_early_table_space(end);
+
+ start = (unsigned long)__va(start);
+ end = (unsigned long)__va(end);
+
+ for (; start < end; start = next) {
+ unsigned long pud_phys;
+ pud_t *pud = alloc_low_page(&pud_phys);
+ make_page_readonly(pud);
+ xen_pud_pin(pud_phys);
+ next = start + PGDIR_SIZE;
+ if (next > end)
+ next = end;
+ phys_pud_init(pud, __pa(start), __pa(next));
+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+ }
+
+ early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
+ table_start<<PAGE_SHIFT,
+ table_end<<PAGE_SHIFT);
+
+// start_pfn = table_end;
+
+ /*
+ * TBD: Need to calculate at runtime
+ */
+ start_pfn = (8*0x100000) >> PAGE_SHIFT;
+
+ __flush_tlb_all();
+ init_mapping_done = 1;
+}
+
+extern struct x8664_pda cpu_pda[NR_CPUS];
+
+void zap_low_mappings(void)
+{
+ /* this is not required for Xen */
+#if 0
+ swap_low_mappings();
+#endif
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init paging_init(void)
+{
+ int i;
+
+ {
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ /* unsigned int max_dma; */
+ /* max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; */
+ /* if (end_pfn < max_dma) */
+ zones_size[ZONE_DMA] = end_pfn;
+#if 0
+ else {
+ zones_size[ZONE_DMA] = max_dma;
+ zones_size[ZONE_NORMAL] = end_pfn - max_dma;
+ }
+#endif
+ free_area_init(zones_size);
+ }
+
+ __set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info,
+ __pgprot(_KERNPG_TABLE));
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+
+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* Setup mapping of lower 1st MB */
+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
+ if (xen_start_info.flags & SIF_PRIVILEGED)
+ __set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE,
+ __pgprot(_KERNPG_TABLE));
+ else
+ set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
+ virt_to_machine(empty_zero_page));
+#endif
+
+}
+#endif
+
+/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
+ from the CPU leading to inconsistent cache lines. address and size
+ must be aligned to 2MB boundaries.
+ Does nothing when the mapping doesn't exist. */
+void __init clear_kernel_mapping(unsigned long address, unsigned long size)
+{
+ unsigned long end = address + size;
+
+ BUG_ON(address & ~LARGE_PAGE_MASK);
+ BUG_ON(size & ~LARGE_PAGE_MASK);
+
+ for (; address < end; address += LARGE_PAGE_SIZE) {
+ pgd_t *pgd = pgd_offset_k(address);
+ pud_t *pud;
+ pmd_t *pmd;
+ if (pgd_none(*pgd))
+ continue;
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud))
+ continue;
+ pmd = pmd_offset(pud, address);
+ if (!pmd || pmd_none(*pmd))
+ continue;
+ if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
+ /* Could handle this, but it should not happen currently. */
+ printk(KERN_ERR
+ "clear_kernel_mapping: mapping has been split. will leak memory\n");
+ pmd_ERROR(*pmd);
+ }
+ set_pmd(pmd, __pmd(0));
+ }
+ __flush_tlb_all();
+}
+
+static inline int page_is_ram (unsigned long pagenr)
+{
+ if (pagenr < start_pfn || pagenr >= end_pfn)
+ return 0;
+
+ return 1;
+}
+
+extern int swiotlb_force;
+
+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
+ kcore_vsyscall;
+
+void __init mem_init(void)
+{
+ int codesize, reservedpages, datasize, initsize;
+ int tmp;
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_force)
+ swiotlb = 1;
+ if (!iommu_aperture &&
+ (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
+ swiotlb = 1;
+ if (swiotlb)
+ swiotlb_init();
+#endif
+
+ /* How many end-of-memory variables you have, grandma! */
+ max_low_pfn = end_pfn;
+ max_pfn = end_pfn;
+ num_physpages = end_pfn;
+ high_memory = (void *) __va(end_pfn * PAGE_SIZE);
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ reservedpages = 0;
+
+ /* this will put all low memory onto the freelists */
+#ifdef CONFIG_DISCONTIGMEM
+ totalram_pages += numa_free_all_bootmem();
+ tmp = 0;
+ /* should count reserved pages here for all nodes */
+#else
+ max_mapnr = end_pfn;
+ if (!mem_map) BUG();
+
+ totalram_pages += free_all_bootmem();
+
+ for (tmp = 0; tmp < end_pfn; tmp++)
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
+ reservedpages++;
+#endif
+
+ after_bootmem = 1;
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ /* Register memory areas for /proc/kcore */
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ VMALLOC_END-VMALLOC_START);
+ kclist_add(&kcore_kernel, &_stext, _end - _stext);
+ kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
+ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
+ VSYSCALL_END - VSYSCALL_START);
+
+ printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ end_pfn << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10);
+
+ /*
+ * Subtle. SMP is doing its boot stuff late (because it has to
+ * fork idle threads) - but it also needs low mappings for the
+ * protected-mode entry to work. We zap these entries only after
+ * the WP-bit has been tested.
+ */
+#ifndef CONFIG_SMP
+ zap_low_mappings();
+#endif
+}
+
+extern char __initdata_begin[], __initdata_end[];
+
+void free_initmem(void)
+{
+#ifdef __DO_LATER__
+ /*
+ * Some pages can be pinned, but some are not. Unpinning such pages
+ * triggers BUG().
+ */
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
+ memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
+ xen_pte_unpin(__pa(addr));
+ make_page_writable(__va(__pa(addr)));
+ /*
+ * Make pages from __PAGE_OFFSET address as well
+ */
+ make_page_writable((void *)addr);
+ free_page(addr);
+ totalram_pages++;
+ }
+ memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
+ printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
+#endif
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < (unsigned long)&_end)
+ return;
+ printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
+
+void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
+{
+ /* Should check here against the e820 map to avoid double free */
+#ifdef CONFIG_DISCONTIGMEM
+ int nid = phys_to_nid(phys);
+ reserve_bootmem_node(NODE_DATA(nid), phys, len);
+#else
+ reserve_bootmem(phys, len);
+#endif
+}
+
+int kern_addr_valid(unsigned long addr)
+{
+ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (above != 0 && above != -1UL)
+ return 0;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
+ return 0;
+
+ pud = pud_offset_k(addr);
+ if (pud_none(*pud))
+ return 0;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;
+ if (pmd_large(*pmd))
+ return pfn_valid(pmd_pfn(*pmd));
+
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte))
+ return 0;
+ return pfn_valid(pte_pfn(*pte));
+}
+
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+
+extern int exception_trace, page_fault_trace;
+
+static ctl_table debug_table2[] = {
+ { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
+ proc_dointvec },
+#ifdef CONFIG_CHECKING
+ { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
+ proc_dointvec },
+#endif
+ { 0, }
+};
+
+static ctl_table debug_root_table2[] = {
+ { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
+ .child = debug_table2 },
+ { 0 },
+};
+
+static __init int x8664_sysctl_init(void)
+{
+ register_sysctl_table(debug_root_table2, 1);
+ return 0;
+}
+__initcall(x8664_sysctl_init);
+#endif
+
+/* Pseudo VMAs to allow ptrace access for the vsyscall pages. x86-64 has two
+ different ones: one for 32bit and one for 64bit. Use the appropiate
+ for the target task. */
+
+static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_START,
+ .vm_end = VSYSCALL_END,
+ .vm_page_prot = PAGE_READONLY
+};
+
+static struct vm_area_struct gate32_vma = {
+ .vm_start = VSYSCALL32_BASE,
+ .vm_end = VSYSCALL32_END,
+ .vm_page_prot = PAGE_READONLY
+};
+
+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+{
+#ifdef CONFIG_IA32_EMULATION
+ if (test_tsk_thread_flag(tsk, TIF_IA32)) {
+ /* lookup code assumes the pages are present. set them up
+ now */
+ if (__map_syscall32(tsk->mm, VSYSCALL32_BASE) < 0)
+ return NULL;
+ return &gate32_vma;
+ }
+#endif
+ return &gate_vma;
+}
+
+int in_gate_area(struct task_struct *task, unsigned long addr)
+{
+ struct vm_area_struct *vma = get_gate_vma(task);
+ return (addr >= vma->vm_start) && (addr < vma->vm_end);
+}
+
+/* Use this when you have no reliable task/vma, typically from interrupt
+ * context. It is less reliable than using the task's vma and may give
+ * false positives.
+ */
+int in_gate_area_no_task(unsigned long addr)
+{
+ return (((addr >= VSYSCALL_START) && (addr < VSYSCALL_END)) ||
+ ((addr >= VSYSCALL32_BASE) && (addr < VSYSCALL32_END)));
+}
--- /dev/null
+/*
+ * arch/x86_64/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/fixmap.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+/*
+ * Reuse arch/xen/i396/mm/ioremap.c. Need to merge later
+ */
+#ifndef CONFIG_XEN_PHYSDEV_ACCESS
+
+void * __ioremap(unsigned long phys_addr, unsigned long size,
+ unsigned long flags)
+{
+ return NULL;
+}
+
+void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ return NULL;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+}
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ return NULL;
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+}
+
+#else
+
+#if defined(__i386__)
+/*
+ * Does @address reside within a non-highmem page that is local to this virtual
+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
+ * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
+ * why this works.
+ */
+static inline int is_local_lowmem(unsigned long address)
+{
+ extern unsigned long max_low_pfn;
+ unsigned long mfn = address >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+ return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+}
+#elif defined(__x86_64__)
+/*
+ *
+ */
+static inline int is_local_lowmem(unsigned long address)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+ void __iomem * addr;
+ struct vm_struct * area;
+ unsigned long offset, last_addr;
+ domid_t domid = DOMID_IO;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
+#endif
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (is_local_lowmem(phys_addr)) {
+ char *t_addr, *t_end;
+ struct page *page;
+
+ t_addr = bus_to_virt(phys_addr);
+ t_end = t_addr + (size - 1);
+
+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+ if(!PageReserved(page))
+ return NULL;
+
+ domid = DOMID_LOCAL;
+ }
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr+1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
+ if (!area)
+ return NULL;
+ area->phys_addr = phys_addr;
+ addr = (void __iomem *) area->addr;
+ if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
+ size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
+ _PAGE_DIRTY | _PAGE_ACCESSED
+#if defined(__x86_64__)
+ | _PAGE_USER
+#endif
+ | flags), domid)) {
+ vunmap((void __force *) addr);
+ return NULL;
+ }
+ return (void __iomem *) (offset + (char __iomem *)addr);
+}
+
+
+/**
+ * ioremap_nocache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * on the CPU as well as honouring existing caching rules from things like
+ * the PCI bus. Note that there are other caches and buffers on many
+ * busses. In particular driver authors should read up on PCI writes
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ *
+ * Must be freed with iounmap.
+ */
+
+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ unsigned long last_addr;
+ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
+ if (!p)
+ return p;
+
+ /* Guaranteed to be > phys_addr, as per __ioremap() */
+ last_addr = phys_addr + size - 1;
+
+ if (is_local_lowmem(last_addr)) {
+ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
+ unsigned long npages;
+
+ phys_addr &= PAGE_MASK;
+
+ /* This might overflow and become zero.. */
+ last_addr = PAGE_ALIGN(last_addr);
+
+ /* .. but that's ok, because modulo-2**n arithmetic will make
+ * the page-aligned "last - first" come out right.
+ */
+ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
+
+ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
+ iounmap(p);
+ p = NULL;
+ }
+ global_flush_tlb();
+ }
+
+ return p;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+ struct vm_struct *p;
+ if ((void __force *) addr <= high_memory)
+ return;
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
+ return;
+#endif
+ p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+ if (!p) {
+ printk("__iounmap: bad address %p\n", addr);
+ return;
+ }
+
+ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
+ /* p->size includes the guard page, but cpa doesn't like that */
+ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
+ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
+ PAGE_KERNEL);
+ global_flush_tlb();
+ }
+ kfree(p);
+}
+
+#if defined(__i386__)
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ unsigned long offset, last_addr;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
+#endif
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr) - phys_addr;
+
+ /*
+ * Mappings have to fit in the FIX_BTMAP area.
+ */
+ nrpages = size >> PAGE_SHIFT;
+ if (nrpages > NR_FIX_BTMAPS)
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ set_fixmap_ma(idx, phys_addr);
+ phys_addr += PAGE_SIZE;
+ --idx;
+ --nrpages;
+ }
+ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+ unsigned long virt_addr;
+ unsigned long offset;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ virt_addr = (unsigned long)addr;
+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+ return;
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
+ return;
+#endif
+ offset = virt_addr & ~PAGE_MASK;
+ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
+
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ clear_fixmap(idx);
+ --idx;
+ --nrpages;
+ }
+}
+#endif /* defined(__i386__) */
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
+
+/* These hacky macros avoid phys->machine translations. */
+#define __direct_pte(x) ((pte_t) { (x) } )
+#define __direct_mk_pte(page_nr,pgprot) \
+ __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+#define direct_mk_pte_phys(physpage, pgprot) \
+ __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline void direct_remap_area_pte(pte_t *pte,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ if (address >= end)
+ BUG();
+
+ do {
+ (*v)->ptr = virt_to_machine(pte);
+ (*v)++;
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int direct_remap_area_pmd(struct mm_struct *mm,
+ pmd_t *pmd,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ if (address >= end)
+ BUG();
+ do {
+ pte_t *pte = (mm == &init_mm) ?
+ pte_alloc_kernel(mm, pmd, address) :
+ pte_alloc_map(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ direct_remap_area_pte(pte, address, end - address, v);
+ pte_unmap(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+ int error;
+
+#if defined(__i386__)
+ dir = pgd_offset(mm, address);
+#elif defined (__x86_64)
+ dir = (mm == &init_mm) ?
+ pgd_offset_k(address):
+ pgd_offset(mm, address);
+#endif
+ if (address >= end)
+ BUG();
+ spin_lock(&mm->page_table_lock);
+ do {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ error = -ENOMEM;
+ pud = pud_alloc(mm, dir, address);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(mm, pud, address);
+ if (!pmd)
+ break;
+ error = 0;
+ direct_remap_area_pmd(mm, pmd, address, end - address, &v);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+
+ } while (address && (address < end));
+ spin_unlock(&mm->page_table_lock);
+ return error;
+}
+
+
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid)
+{
+ int i;
+ unsigned long start_address;
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+
+ v = w = &u[0];
+ if (domid != DOMID_LOCAL) {
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)domid << 16;
+ v = w = &u[1];
+ }
+
+ start_address = address;
+
+ flush_cache_all();
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
+ /* Fill in the PTE pointers. */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
+ w);
+
+ if (HYPERVISOR_mmu_update(u, v - u, NULL) < 0)
+ return -EFAULT;
+ v = w;
+ start_address = address;
+ }
+
+ /*
+ * Fill in the machine address: PTE ptr is done later by
+ * __direct_remap_area_pages().
+ */
+ v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
+
+ machine_addr += PAGE_SIZE;
+ address += PAGE_SIZE;
+ v++;
+ }
+
+ if (v != w) {
+ /* get the ptep's filled in */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
+ w);
+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0))
+ return -EFAULT;
+ }
+
+ flush_tlb_all();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(direct_remap_area_pages);
--- /dev/null
+/*
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ * Thanks to Ben LaHaise for precious feedback.
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm/io.h>
+
+void pte_free(struct page *pte)
+{
+ pte_t *ptep;
+
+ ptep = pfn_to_kaddr(page_to_pfn(pte));
+
+ xen_pte_unpin(__pa(ptep));
+ make_page_writable(ptep);
+ flush_page_update_queue();
+ __free_page(pte);
+}
+
+static inline pte_t *lookup_address(unsigned long address)
+{
+ pgd_t *pgd = pgd_offset_k(address);
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ if (pgd_none(*pgd))
+ return NULL;
+ pud = pud_offset(pgd, address);
+ if (!pud_present(*pud))
+ return NULL;
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(*pmd))
+ return NULL;
+ if (pmd_large(*pmd))
+ return (pte_t *)pmd;
+ pte = pte_offset_kernel(pmd, address);
+ if (pte && !pte_present(*pte))
+ pte = NULL;
+ return pte;
+}
+
+static struct page *split_large_page(unsigned long address, pgprot_t prot,
+ pgprot_t ref_prot)
+{
+ int i;
+ unsigned long addr;
+ struct page *base = alloc_pages(GFP_KERNEL, 0);
+ pte_t *pbase;
+ if (!base)
+ return NULL;
+ address = __pa(address);
+ addr = address & LARGE_PAGE_MASK;
+ pbase = (pte_t *)page_address(base);
+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
+ addr == address ? prot : ref_prot);
+ }
+ return base;
+}
+
+
+static void flush_kernel_map(void *address)
+{
+ if (0 && address && cpu_has_clflush) {
+ /* is this worth it? */
+ int i;
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ asm volatile("clflush (%0)" :: "r" (address + i));
+ } else
+ asm volatile("wbinvd":::"memory");
+ if (address)
+ __flush_tlb_one((unsigned long) address);
+ else
+ __flush_tlb_all();
+}
+
+
+static inline void flush_map(unsigned long address)
+{
+ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
+}
+
+struct deferred_page {
+ struct deferred_page *next;
+ struct page *fpage;
+ unsigned long address;
+};
+static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
+
+static inline void save_page(unsigned long address, struct page *fpage)
+{
+ struct deferred_page *df;
+ df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
+ if (!df) {
+ flush_map(address);
+ __free_page(fpage);
+ } else {
+ df->next = df_list;
+ df->fpage = fpage;
+ df->address = address;
+ df_list = df;
+ }
+}
+
+/*
+ * No more special protections in this 2/4MB area - revert to a
+ * large page again.
+ */
+static void revert_page(unsigned long address, pgprot_t ref_prot)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t large_pte;
+
+ pgd = pgd_offset_k(address);
+ BUG_ON(pgd_none(*pgd));
+ pud = pud_offset(pgd,address);
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, address);
+ BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
+ pgprot_val(ref_prot) |= _PAGE_PSE;
+ large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
+ set_pte((pte_t *)pmd, large_pte);
+}
+
+static int
+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
+ pgprot_t ref_prot)
+{
+ pte_t *kpte;
+ struct page *kpte_page;
+ unsigned kpte_flags;
+ kpte = lookup_address(address);
+ if (!kpte) return 0;
+ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
+ kpte_flags = pte_val(*kpte);
+ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
+ if ((kpte_flags & _PAGE_PSE) == 0) {
+ set_pte(kpte, pfn_pte(pfn, prot));
+ } else {
+ /*
+ * split_large_page will take the reference for this change_page_attr
+ * on the split page.
+ */
+ struct page *split = split_large_page(address, prot, ref_prot);
+ if (!split)
+ return -ENOMEM;
+ set_pte(kpte,mk_pte(split, ref_prot));
+ kpte_page = split;
+ }
+ get_page(kpte_page);
+ } else if ((kpte_flags & _PAGE_PSE) == 0) {
+ set_pte(kpte, pfn_pte(pfn, ref_prot));
+ __put_page(kpte_page);
+ } else
+ BUG();
+
+ /* on x86-64 the direct mapping set at boot is not using 4k pages */
+ BUG_ON(PageReserved(kpte_page));
+
+ switch (page_count(kpte_page)) {
+ case 1:
+ save_page(address, kpte_page);
+ revert_page(address, ref_prot);
+ break;
+ case 0:
+ BUG(); /* memleak and failed 2M page regeneration */
+ }
+ return 0;
+}
+
+/*
+ * Change the page attributes of an page in the linear mapping.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ *
+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
+ * This function only deals with the kernel linear map.
+ *
+ * Caller must call global_flush_tlb() after this.
+ */
+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
+{
+ int err = 0;
+ int i;
+
+ down_write(&init_mm.mmap_sem);
+ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
+ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+
+ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
+ if (err)
+ break;
+ /* Handle kernel mapping too which aliases part of the
+ * lowmem */
+ if (__pa(address) < KERNEL_TEXT_SIZE) {
+ unsigned long addr2;
+ pgprot_t prot2 = prot;
+ addr2 = __START_KERNEL_map + __pa(address);
+ pgprot_val(prot2) &= ~_PAGE_NX;
+ err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
+ }
+ }
+ up_write(&init_mm.mmap_sem);
+ return err;
+}
+
+/* Don't call this for MMIO areas that may not have a mem_map entry */
+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ return change_page_attr_addr(addr, numpages, prot);
+}
+
+void global_flush_tlb(void)
+{
+ struct deferred_page *df, *next_df;
+
+ down_read(&init_mm.mmap_sem);
+ df = xchg(&df_list, NULL);
+ up_read(&init_mm.mmap_sem);
+ if (!df)
+ return;
+ flush_map((df && !df->next) ? df->address : 0);
+ for (; df; df = next_df) {
+ next_df = df->next;
+ if (df->fpage)
+ __free_page(df->fpage);
+ kfree(df);
+ }
+}
+
+EXPORT_SYMBOL(change_page_attr);
+EXPORT_SYMBOL(global_flush_tlb);
--- /dev/null
+#
+# Makefile for X86_64 specific PCI routines
+#
+# Reuse the i386 PCI subsystem
+#
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+CFLAGS += -Iarch/$(XENARCH)/pci
+
+CFLAGS += -Iarch/i386/pci
+
+c-obj-y := i386.o
+c-obj-y += fixup.o
+c-obj-$(CONFIG_ACPI_PCI) += acpi.o
+c-obj-y += legacy.o common.o
+c-xen-obj-$(CONFIG_PCI_DIRECT)+= direct.o
+c-xen-obj-y += irq.o
+# mmconfig has a 64bit special
+c-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
+
+c-obj-$(CONFIG_NUMA) += k8-bus.o
+
+c-direct-y += ../../i386/pci/direct.o
+c-acpi-y += ../../i386/pci/acpi.o
+c-legacy-y += ../../i386/pci/legacy.o
+c-irq-y += ../../i386/pci/irq.o
+c-common-y += ../../i386/pci/common.o
+c-fixup-y += ../../i386/pci/fixup.o
+c-i386-y += ../../i386/pci/i386.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-xen-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/xen/i386/pci/$(notdir $@) $@
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+obj-y += $(c-xen-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
--- /dev/null
+#
+# Makefile for X86_64 specific PCI routines
+#
+# Reuse the i386 PCI subsystem
+#
+CFLAGS += -I arch/i386/pci
+
+obj-y := i386.o
+obj-$(CONFIG_PCI_DIRECT)+= direct.o
+obj-y += fixup.o
+obj-$(CONFIG_ACPI_PCI) += acpi.o
+obj-y += legacy.o irq.o common.o
+# mmconfig has a 64bit special
+obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
+
+direct-y += ../../i386/pci/direct.o
+acpi-y += ../../i386/pci/acpi.o
+legacy-y += ../../i386/pci/legacy.o
+irq-y += ../../i386/pci/irq.o
+common-y += ../../i386/pci/common.o
+fixup-y += ../../i386/pci/fixup.o
+i386-y += ../../i386/pci/i386.o
--- /dev/null
+#ifndef _ASM_ARCH_HOOKS_H
+#define _ASM_ARCH_HOOKS_H
+
+#include <linux/interrupt.h>
+
+/*
+ * linux/include/asm/arch_hooks.h
+ *
+ * define the architecture specific hooks
+ */
+
+/* these aren't arch hooks, they are generic routines
+ * that can be used by the hooks */
+extern void init_ISA_irqs(void);
+extern void apic_intr_init(void);
+extern void smp_intr_init(void);
+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/* these are the defined hooks */
+extern void intr_init_hook(void);
+extern void pre_intr_init_hook(void);
+extern void pre_setup_arch_hook(void);
+extern void trap_init_hook(void);
+extern void time_init_hook(void);
+extern void mca_nmi_hook(void);
+
+#endif
--- /dev/null
+
+#ifndef _X86_64_BOOTSETUP_H
+#define _X86_64_BOOTSETUP_H 1
+
+extern char x86_boot_params[2048];
+
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+#define PARAM ((unsigned char *)x86_boot_params)
+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
+#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
+#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
+#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
+
+#define INITRD_START (__pa(xen_start_info.mod_start))
+#define INITRD_SIZE (xen_start_info.mod_len)
+#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
+
+#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
+#define COMMAND_LINE saved_command_line
+
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+#endif
--- /dev/null
+/* Written 2000 by Andi Kleen */
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#include <linux/threads.h>
+#include <asm/ldt.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/string.h>
+#include <asm/segment.h>
+#include <asm/mmu.h>
+
+// 8 byte segment descriptor
+struct desc_struct {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
+ unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
+} __attribute__((packed));
+
+struct n_desc_struct {
+ unsigned int a,b;
+};
+
+enum {
+ GATE_INTERRUPT = 0xE,
+ GATE_TRAP = 0xF,
+ GATE_CALL = 0xC,
+};
+
+// 16byte gate
+struct gate_struct {
+ u16 offset_low;
+ u16 segment;
+ unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
+ u16 offset_middle;
+ u32 offset_high;
+ u32 zero1;
+} __attribute__((packed));
+
+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
+
+enum {
+ DESC_TSS = 0x9,
+ DESC_LDT = 0x2,
+};
+
+// LDT or TSS descriptor in the GDT. 16 bytes.
+struct ldttss_desc {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+ u32 base3;
+ u32 zero1;
+} __attribute__((packed));
+
+struct desc_ptr {
+ unsigned short size;
+ unsigned long address;
+} __attribute__((packed)) ;
+
+extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
+
+extern struct desc_ptr cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+
+#define get_cpu_gdt_table(_cpu) ((struct desc_ptr *)(cpu_gdt_descr[(_cpu)].address))
+
+#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
+#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
+#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt[];
+extern struct gate_struct idt_table[];
+
+static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
+{
+ struct gate_struct s;
+ s.offset_low = PTR_LOW(func);
+ s.segment = __KERNEL_CS;
+ s.ist = ist;
+ s.p = 1;
+ s.dpl = dpl;
+ s.zero0 = 0;
+ s.zero1 = 0;
+ s.type = type;
+ s.offset_middle = PTR_MIDDLE(func);
+ s.offset_high = PTR_HIGH(func);
+ /* does not need to be atomic because it is only done once at setup time */
+ memcpy(adr, &s, 16);
+}
+
+static inline void set_intr_gate(int nr, void *func)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
+}
+
+static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
+}
+
+static inline void set_system_gate(int nr, void *func)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
+}
+
+static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
+ unsigned size)
+{
+ struct ldttss_desc d;
+ memset(&d,0,sizeof(d));
+ d.limit0 = size & 0xFFFF;
+ d.base0 = PTR_LOW(tss);
+ d.base1 = PTR_MIDDLE(tss) & 0xFF;
+ d.type = type;
+ d.p = 1;
+ d.limit1 = (size >> 16) & 0xF;
+ d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
+ d.base3 = PTR_HIGH(tss);
+ memcpy(ptr, &d, 16);
+}
+
+static inline void set_tss_desc(unsigned cpu, void *addr)
+{
+ set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS],
+ (unsigned long)addr,
+ DESC_TSS,
+ sizeof(struct tss_struct) - 1);
+}
+
+static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
+{
+ set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
+ (unsigned long)addr,
+ DESC_LDT, size * 8 - 1);
+}
+
+static inline void set_seg_base(unsigned cpu, int entry, void *base)
+{
+ struct desc_struct *d = (struct desc_struct *)&get_cpu_gdt_table(cpu)[entry];
+ u32 addr = (u32)(u64)base;
+ BUG_ON((u64)base >> 32);
+ d->base0 = addr & 0xffff;
+ d->base1 = (addr >> 16) & 0xff;
+ d->base2 = (addr >> 24) & 0xff;
+}
+
+#define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+/* Don't allow setting of the lm bit. It is useless anyways because
+ 64bit system calls require __USER_CS. */
+#define LDT_entry_b(info) \
+ (((info)->base_addr & 0xff000000) | \
+ (((info)->base_addr & 0x00ff0000) >> 16) | \
+ ((info)->limit & 0xf0000) | \
+ (((info)->read_exec_only ^ 1) << 9) | \
+ ((info)->contents << 10) | \
+ (((info)->seg_not_present ^ 1) << 15) | \
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+ /* ((info)->lm << 21) | */ \
+ 0x7000)
+
+#define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 && \
+ (info)->lm == 0)
+
+#if TLS_SIZE != 24
+# error update this code.
+#endif
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+#if 0
+ u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN);
+ gdt[0] = t->tls_array[0];
+ gdt[1] = t->tls_array[1];
+ gdt[2] = t->tls_array[2];
+#endif
+#define C(i) \
+ HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
+
+ C(0); C(1); C(2);
+#undef C
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+extern inline void load_LDT_nolock (mm_context_t *pc, int cpu)
+{
+ void *segments = pc->ldt;
+ int count = pc->size;
+
+ if (likely(!count))
+ segments = NULL;
+
+ queue_set_ldt((unsigned long)segments, count);
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+ int cpu = get_cpu();
+ load_LDT_nolock(pc, cpu);
+ put_cpu();
+}
+
+extern struct desc_ptr idt_descr;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
--- /dev/null
+#ifndef _X8664_DMA_MAPPING_H
+#define _X8664_DMA_MAPPING_H 1
+
+/*
+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
+ * documentation.
+ */
+
+#include <linux/config.h>
+
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+#include <asm/swiotlb.h>
+
+extern dma_addr_t bad_dma_address;
+#define dma_mapping_error(x) \
+ (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address))
+
+void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ unsigned gfp);
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+
+#ifdef CONFIG_GART_IOMMU
+
+extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
+ int direction);
+extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
+ int direction);
+
+#else
+
+/* No IOMMU */
+
+static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr,
+ size_t size, int direction)
+{
+ dma_addr_t addr;
+
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+ addr = virt_to_phys(ptr);
+
+ if ((addr+size) & ~*hwdev->dma_mask)
+ out_of_line_bug();
+ return phys_to_machine(addr);
+}
+
+static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+ /* Nothing to do */
+}
+#endif
+
+#define dma_map_page(dev,page,offset,size,dir) \
+ dma_map_single((dev), page_address(page)+(offset), (size), (dir))
+
+static inline void dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction);
+
+ flush_write_buffers();
+}
+
+static inline void dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction);
+
+ flush_write_buffers();
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *hwdev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_sg_for_cpu(hwdev,sg,nelems,direction);
+
+ flush_write_buffers();
+}
+
+static inline void dma_sync_sg_for_device(struct device *hwdev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_sg_for_device(hwdev,sg,nelems,direction);
+
+ flush_write_buffers();
+}
+
+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+
+#define dma_unmap_page dma_unmap_single
+
+extern int dma_supported(struct device *hwdev, u64 mask);
+extern int dma_get_cache_alignment(void);
+#define dma_is_consistent(h) 1
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
+
+static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+{
+ flush_write_buffers();
+}
+#endif
--- /dev/null
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#include <asm/vsyscall.h>
+#include <asm/vsyscall32.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+
+enum fixed_addresses {
+ VSYSCALL_LAST_PAGE,
+ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+ VSYSCALL_HPET,
+ FIX_HPET_BASE,
+#ifdef CONFIG_X86_LOCAL_APIC
+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+#endif
+ FIX_SHARED_INFO,
+ FIX_GNTTAB,
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+#define NR_FIX_ISAMAPS 256
+ FIX_ISAMAP_END,
+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
+
+extern void __set_fixmap_ma (enum fixed_addresses idx,
+ unsigned long mach, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+
+#define set_fixmap_ma(idx, phys) \
+ __set_fixmap_ma(idx, phys, PAGE_KERNEL)
+#define set_fixmap_ma_ro(idx, phys) \
+ __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
+
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, __pgprot(0))
+
+#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
+#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
+#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+
+extern void __this_fixmap_does_not_exist(void);
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+extern inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices).
+ *
+ * If it doesn't get removed, the linker will complain
+ * loudly with a reasonably clear error message..
+ */
+ if (idx >= __end_of_fixed_addresses)
+ __this_fixmap_does_not_exist();
+
+ return __fix_to_virt(idx);
+}
+
+#endif
--- /dev/null
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ *
+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
+ */
+#ifndef __ASM_XEN_X86_64_FLOPPY_H
+#define __ASM_XEN_X86_64_FLOPPY_H
+
+#include <linux/vmalloc.h>
+
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define _CROSS_64KB(a,s,vdma) \
+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+
+#include <linux/vmalloc.h>
+
+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
+#include <asm/dma.h>
+#undef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS 0
+#define CROSS_64KB(a,s) (0)
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(value,port) outb_p(value,port)
+
+#define fd_request_dma() (0)
+#define fd_free_dma() ((void)0)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue() vdma_get_dma_residue(FLOPPY_DMA)
+#define fd_dma_mem_alloc(size) vdma_mem_alloc(size)
+#define fd_dma_mem_free(addr, size) vdma_mem_free(addr, size)
+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
+
+static int virtual_dma_count;
+static int virtual_dma_residue;
+static char *virtual_dma_addr;
+static int virtual_dma_mode;
+static int doing_pdma;
+
+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
+{
+ register unsigned char st;
+
+#undef TRACE_FLPY_INT
+
+#ifdef TRACE_FLPY_INT
+ static int calls=0;
+ static int bytes=0;
+ static int dma_wait=0;
+#endif
+ if (!doing_pdma)
+ return floppy_interrupt(irq, dev_id, regs);
+
+#ifdef TRACE_FLPY_INT
+ if(!calls)
+ bytes = virtual_dma_count;
+#endif
+
+ {
+ register int lcount;
+ register char *lptr;
+
+ st = 1;
+ for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st=inb(virtual_dma_port+4) & 0xa0 ;
+ if(st != 0xa0)
+ break;
+ if(virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port+5);
+ else
+ *lptr = inb_p(virtual_dma_port+5);
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = lptr;
+ st = inb(virtual_dma_port+4);
+ }
+
+#ifdef TRACE_FLPY_INT
+ calls++;
+#endif
+ if(st == 0x20)
+ return IRQ_HANDLED;
+ if(!(st & 0x20)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+#ifdef TRACE_FLPY_INT
+ printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+ virtual_dma_count, virtual_dma_residue, calls, bytes,
+ dma_wait);
+ calls = 0;
+ dma_wait=0;
+#endif
+ doing_pdma = 0;
+ floppy_interrupt(irq, dev_id, regs);
+ return IRQ_HANDLED;
+ }
+#ifdef TRACE_FLPY_INT
+ if(!virtual_dma_count)
+ dma_wait++;
+#endif
+ return IRQ_HANDLED;
+}
+
+static void fd_disable_dma(void)
+{
+ doing_pdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+}
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+ return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+ return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
+ "floppy", NULL);
+}
+
+
+static unsigned long vdma_mem_alloc(unsigned long size)
+{
+ return (unsigned long) vmalloc(size);
+
+}
+
+static void vdma_mem_free(unsigned long addr, unsigned long size)
+{
+ vfree((void *)addr);
+}
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ doing_pdma = 1;
+ virtual_dma_port = io;
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_addr = addr;
+ virtual_dma_count = size;
+ virtual_dma_residue = 0;
+ return 0;
+}
+
+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
+#define FDC1 xen_floppy_init()
+static int FDC2 = -1;
+
+static int xen_floppy_init(void)
+{
+ use_virtual_dma = 1;
+ can_use_virtual_dma = 1;
+ return 0x340;
+}
+
+/*
+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
+ * coincides with another rtc CMOS user. Paul G.
+ */
+#define FLOPPY0_TYPE ({ \
+ unsigned long flags; \
+ unsigned char val; \
+ spin_lock_irqsave(&rtc_lock, flags); \
+ val = (CMOS_READ(0x10) >> 4) & 15; \
+ spin_unlock_irqrestore(&rtc_lock, flags); \
+ val; \
+})
+
+#define FLOPPY1_TYPE ({ \
+ unsigned long flags; \
+ unsigned char val; \
+ spin_lock_irqsave(&rtc_lock, flags); \
+ val = CMOS_READ(0x10) & 15; \
+ spin_unlock_irqrestore(&rtc_lock, flags); \
+ val; \
+})
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+#define FLOPPY_MOTOR_MASK 0xf0
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __ASM_XEN_X86_64_FLOPPY_H */
--- /dev/null
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+/*
+ * Benjamin Liu <benjamin.liu@intel.com>
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Ported to x86-64.
+ *
+ */
+
+#ifndef __HYPERCALL_H__
+#define __HYPERCALL_H__
+#include <asm-xen/xen-public/xen.h>
+
+#define __syscall_clobber "r11","rcx","memory"
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_trap_table), "D" (table)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_mmu_update), "D" (req), "S" ((long)count),
+ "d" (success_count)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_gdt), "D" (frame_list), "S" ((long)entries)
+ : __syscall_clobber );
+
+
+ return ret;
+}
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_stack_switch), "D" (ss), "S" (esp)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_address, unsigned long failsafe_address,
+ unsigned long syscall_address)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_callbacks), "D" (event_address),
+ "S" (failsafe_address), "d" (syscall_address)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" ((unsigned long)__HYPERVISOR_fpu_taskswitch) : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_yield(
+ void)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op), "D" ((unsigned long)SCHEDOP_yield)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_block(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op), "D" ((unsigned long)SCHEDOP_block)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_shutdown(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift)))
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_reboot(
+ void)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift)))
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
+{
+ int ret;
+
+ /* NB. On suspend, control software expects a suspend record in %esi. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift))),
+ "S" (srec)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+/*
+ * We can have the timeout value in a single argument for the hypercall, but
+ * that will break the common code.
+ */
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
+{
+ unsigned long timeout_hi = timeout >> 32;
+ unsigned long timeout_lo = timeout & 0xffffffff;
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_timer_op),
+ "D" (timeout_hi), "S" (timeout_lo)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+{
+ int ret;
+
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_dom0_op), "D" (dom0_op)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_debugreg), "D" ((unsigned long)reg), "S" (value)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+ int reg)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_get_debugreg), "D" ((unsigned long)reg)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+ unsigned long ma, unsigned long word)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_update_descriptor), "D" (ma), "S" (word)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_fast_trap(
+ int idx)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_fast_trap), "D" ((unsigned long)idx)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom_mem_op(
+ unsigned int op, unsigned long *extent_list,
+ unsigned long nr_extents, unsigned int extent_order)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "movq %5,%%r10; movq %6,%%r8;" TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_dom_mem_op), "D" ((unsigned long)op), "S" (extent_list),
+ "d" (nr_extents), "g" ((unsigned long) extent_order), "g" ((unsigned long) DOMID_SELF)
+ : __syscall_clobber,"r8","r10");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_multicall), "D" (call_list), "S" ((unsigned long)nr_calls)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+ unsigned long page_nr, pte_t new_val, unsigned long flags)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_update_va_mapping),
+ "D" (page_nr), "S" (new_val.pte), "d" (flags)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+ void *op)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_event_channel_op), "D" (op)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_xen_version(
+ int cmd)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_xen_version), "D" ((unsigned long)cmd)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_console_io(
+ int cmd, int count, char *str)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_console_io), "D" ((unsigned long)cmd), "S" ((unsigned long)count), "d" (str)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+ void *physdev_op)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_physdev_op), "D" (physdev_op)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_grant_table_op), "D" ((unsigned long)cmd), "S" ((unsigned long)uop), "d" (count)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+ unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "movq %5, %%r10;" TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_update_va_mapping_otherdomain),
+ "D" (page_nr), "S" (new_val.pte), "d" (flags), "g" ((unsigned long)domid)
+ : __syscall_clobber,"r10");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_vm_assist), "D" ((unsigned long)cmd), "S" ((unsigned long)type)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_switch_to_user(void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" ((unsigned long)__HYPERVISOR_switch_to_user) : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_boot_vcpu(
+ unsigned long vcpu, full_execution_context_t *ctxt)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" (__HYPERVISOR_boot_vcpu), "D" (vcpu), "S" (ctxt)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_segment_base(
+ int reg, unsigned long value)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_segment_base), "D" ((unsigned long)reg), "S" (value)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+#endif /* __HYPERCALL_H__ */
--- /dev/null
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+#include <asm/fixmap.h>
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+ /*
+ * Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+ *
+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+ * isa_read[wl] and isa_write[wl] fixed
+ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
+#else
+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
+#endif
+
+#ifdef REALLY_SLOW_IO
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#else
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+/*
+ * Talk about misusing macros..
+ */
+#define __OUT1(s,x) \
+extern inline void out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
+
+#define __IN1(s) \
+extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+
+#define __INS(s) \
+extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
+#define IO_SPACE_LIMIT 0xffff
+
+#if defined(__KERNEL__) && __x86_64__
+
+#include <linux/vmalloc.h>
+
+#ifndef __i386__
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are pretty trivial
+ */
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+
+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) __va(machine_to_phys(_x))
+#endif
+
+/*
+ * Change "struct page" to physical address.
+ */
+#ifdef CONFIG_DISCONTIGMEM
+#include <asm/mmzone.h>
+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+ (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+ bvec_to_pseudophys((vec2))))
+#else
+// #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+ (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+ bvec_to_pseudophys((vec2))))
+#endif
+
+#include <asm-generic/iomap.h>
+
+extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+extern inline void __iomem * ioremap (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+/*
+ * This one maps high address device memory and turns off caching for that area.
+ * it's useful if some control registers are in such an area and write combining
+ * or read caching is not desirable:
+ */
+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
+extern void iounmap(volatile void __iomem *addr);
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+
+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
+#else
+#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
+#endif
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) __va(machine_to_phys(_x))
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+static inline __u8 __readb(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u8 *)addr;
+}
+static inline __u16 __readw(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u16 *)addr;
+}
+static inline __u32 __readl(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u32 *)addr;
+}
+static inline __u64 __readq(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u64 *)addr;
+}
+#define readb(x) __readb(x)
+#define readw(x) __readw(x)
+#define readl(x) __readl(x)
+#define readq(x) __readq(x)
+#define readb_relaxed(a) readb(a)
+#define readw_relaxed(a) readw(a)
+#define readl_relaxed(a) readl(a)
+#define readq_relaxed(a) readq(a)
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+#define __raw_readq readq
+
+#define mmiowb()
+
+#ifdef CONFIG_UNORDERED_IO
+static inline void __writel(__u32 val, volatile void __iomem *addr)
+{
+ volatile __u32 __iomem *target = addr;
+ asm volatile("movnti %1,%0"
+ : "=m" (*target)
+ : "r" (val) : "memory");
+}
+
+static inline void __writeq(__u64 val, volatile void __iomem *addr)
+{
+ volatile __u64 __iomem *target = addr;
+ asm volatile("movnti %1,%0"
+ : "=m" (*target)
+ : "r" (val) : "memory");
+}
+#else
+static inline void __writel(__u32 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u32 *)addr = b;
+}
+static inline void __writeq(__u64 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u64 *)addr = b;
+}
+#endif
+static inline void __writeb(__u8 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u8 *)addr = b;
+}
+static inline void __writew(__u16 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u16 *)addr = b;
+}
+#define writeq(val,addr) __writeq((val),(addr))
+#define writel(val,addr) __writel((val),(addr))
+#define writew(val,addr) __writew((val),(addr))
+#define writeb(val,addr) __writeb((val),(addr))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+#define __raw_writeq writeq
+
+void __memcpy_fromio(void*,unsigned long,unsigned);
+void __memcpy_toio(unsigned long,const void*,unsigned);
+
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
+{
+ __memcpy_fromio(to,(unsigned long)from,len);
+}
+static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
+{
+ __memcpy_toio((unsigned long)to,from,len);
+}
+
+void memset_io(volatile void __iomem *a, int b, size_t c);
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
+#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, x86-64 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ * check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the mmio address io_addr. This
+ * address should have been obtained by ioremap.
+ * Returns 1 on a match.
+ */
+
+static inline int check_signature(void __iomem *io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
+#define flush_write_buffers()
+
+extern int iommu_bio_merge;
+#define BIO_VMERGE_BOUNDARY iommu_bio_merge
+
+#endif /* __KERNEL__ */
+
+#endif
--- /dev/null
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ * linux/include/asm/irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+/* include comes from machine specific directory */
+#include "irq_vectors.h"
+#include <asm/thread_info.h>
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return ((irq == 2) ? 9 : irq);
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
+#endif
+
+#define KDB_VECTOR 0xf9
+
+# define irq_ctx_init(cpu) do { } while (0)
+
+struct irqaction;
+struct pt_regs;
+int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
+
+#endif /* _ASM_IRQ_H */
--- /dev/null
+/*
+ * arch/i386/mach-generic/io_ports.h
+ *
+ * Machine specific IO port address definition for generic.
+ * Written by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_IO_PORTS_H
+#define _MACH_IO_PORTS_H
+
+/* i8253A PIT registers */
+#define PIT_MODE 0x43
+#define PIT_CH0 0x40
+#define PIT_CH2 0x42
+
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD 0x20
+#define PIC_MASTER_IMR 0x21
+#define PIC_MASTER_ISR PIC_MASTER_CMD
+#define PIC_MASTER_POLL PIC_MASTER_ISR
+#define PIC_MASTER_OCW3 PIC_MASTER_ISR
+#define PIC_SLAVE_CMD 0xa0
+#define PIC_SLAVE_IMR 0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR 2
+#define MASTER_ICW4_DEFAULT 0x01
+#define SLAVE_ICW4_DEFAULT 0x01
+#define PIC_ICW4_AEOI 2
+
+#endif /* !_MACH_IO_PORTS_H */
--- /dev/null
+/*
+ * This file should contain #defines for all of the interrupt vector
+ * numbers used by this architecture.
+ *
+ * In addition, there are some standard defines:
+ *
+ * FIRST_EXTERNAL_VECTOR:
+ * The first free place for external interrupts
+ *
+ * SYSCALL_VECTOR:
+ * The IRQ vector a syscall makes the user to kernel transition
+ * under.
+ *
+ * TIMER_IRQ:
+ * The IRQ number the timer interrupt comes in at.
+ *
+ * NR_IRQS:
+ * The total number of interrupt vectors (including all the
+ * architecture specific interrupts) needed.
+ *
+ */
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR 0x20
+
+#define SYSCALL_VECTOR 0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+#if 0
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ * some of the following vectors are 'rare', they are merged
+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#define SPURIOUS_APIC_VECTOR 0xff
+#define ERROR_APIC_VECTOR 0xfe
+#define INVALIDATE_TLB_VECTOR 0xfd
+#define RESCHEDULE_VECTOR 0xfc
+#define CALL_FUNCTION_VECTOR 0xfb
+
+#define THERMAL_APIC_VECTOR 0xf0
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR 0xef
+#endif
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#define FIRST_DEVICE_VECTOR 0x31
+#define FIRST_SYSTEM_VECTOR 0xef
+
+/*
+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
+#define NR_IPIS 8
+
+#define RESCHEDULE_VECTOR 1
+#define INVALIDATE_TLB_VECTOR 2
+#define CALL_FUNCTION_VECTOR 3
+
+/*
+ * The maximum number of vectors supported by i386 processors
+ * is limited to 256. For processors other than i386, NR_VECTORS
+ * should be changed accordingly.
+ */
+#define NR_VECTORS 256
+
+#define FPU_IRQ 13
+
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
+#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
+
+/*
+ * The flat IRQ space is divided into two regions:
+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
+ * if we have physical device-access privilege. This region is at the
+ * start of the IRQ space so that existing device drivers do not need
+ * to be modified to translate physical IRQ numbers into our IRQ space.
+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ * are bound using the provided bind/unbind functions.
+ */
+
+#define PIRQ_BASE 0
+#define NR_PIRQS 256
+
+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS 256
+
+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
+#define NR_IRQ_VECTORS NR_IRQS
+
+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
+
+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+
+#ifndef __ASSEMBLY__
+/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
+extern int bind_virq_to_irq(int virq);
+extern void unbind_virq_from_irq(int virq);
+extern int bind_ipi_on_cpu_to_irq(int cpu, int ipi);
+extern void unbind_ipi_on_cpu_from_irq(int cpu, int ipi);
+extern int bind_evtchn_to_irq(int evtchn);
+extern void unbind_evtchn_from_irq(int evtchn);
+
+extern void irq_suspend(void);
+extern void irq_resume(void);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IRQ_VECTORS_H */
--- /dev/null
+/*
+ * include/asm-i386/mach-default/mach_time.h
+ *
+ * Machine specific set RTC function for generic.
+ * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_TIME_H
+#define _MACH_TIME_H
+
+#include <linux/mc146818rtc.h>
+
+/* for check timing call set_rtc_mmss() 500ms */
+/* used in arch/i386/time.c::do_timer_interrupt() */
+#define USEC_AFTER 500000
+#define USEC_BEFORE 500000
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ * sets the minutes. Usually you'll only notice that after reboot!
+ */
+static inline int mach_set_rtc_mmss(unsigned long nowtime)
+{
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+
+ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ BCD_TO_BIN(cmos_minutes);
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
+ }
+ CMOS_WRITE(real_seconds,RTC_SECONDS);
+ CMOS_WRITE(real_minutes,RTC_MINUTES);
+ } else {
+ printk(KERN_WARNING
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
+ retval = -1;
+ }
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS12887 (popular MC146818A clone with integrated
+ * battery and quartz) will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned in
+ * the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ return retval;
+}
+
+static inline unsigned long mach_get_cmos_time(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ int i;
+
+ /* The Linux interpretation of the CMOS clock register contents:
+ * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
+ * RTC registers show the second which has precisely just started.
+ * Let's hope other operating systems interpret the RTC the same way.
+ */
+ /* read RTC exactly on falling edge of update flag */
+ for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+ break;
+ for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
+ if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+ break;
+ do { /* Isn't this overkill ? UIP above should guarantee consistency */
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+ } while (sec != CMOS_READ(RTC_SECONDS));
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ {
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
+ }
+ if ((year += 1900) < 1970)
+ year += 100;
+
+ return mktime(year, mon, day, hour, min, sec);
+}
+
+#endif /* !_MACH_TIME_H */
--- /dev/null
+/*
+ * include/asm-i386/mach-default/mach_timer.h
+ *
+ * Machine specific calibrate_tsc() for generic.
+ * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+/* ------ Calibrate the TSC -------
+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
+ * output busy loop as low as possible. We avoid reading the CTC registers
+ * directly because of the awkward 8-bit access mechanism of the 82C54
+ * device.
+ */
+#ifndef _MACH_TIMER_H
+#define _MACH_TIMER_H
+
+#define CALIBRATE_LATCH (5 * LATCH)
+
+static inline void mach_prepare_counter(void)
+{
+ /* Set the Gate high, disable speaker */
+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+ /*
+ * Now let's take care of CTC channel 2
+ *
+ * Set the Gate high, program CTC channel 2 for mode 0,
+ * (interrupt on terminal count mode), binary count,
+ * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+ *
+ * Some devices need a delay here.
+ */
+ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
+ outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
+ outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
+}
+
+static inline void mach_countup(unsigned long *count_p)
+{
+ unsigned long count = 0;
+ do {
+ count++;
+ } while ((inb_p(0x61) & 0x20) == 0);
+ *count_p = count;
+}
+
+#endif /* !_MACH_TIMER_H */
--- /dev/null
+/**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ *
+ * Description:
+ * This is included late in kernel/setup.c so that it can make
+ * use of all of the static functions.
+ **/
+
+static char * __init machine_specific_memory_setup(void)
+{
+ char *who;
+ unsigned long start_pfn, max_pfn;
+
+ who = "Xen";
+
+ start_pfn = 0;
+ max_pfn = xen_start_info.nr_pages;
+
+ e820.nr_map = 0;
+ add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) - PFN_PHYS(start_pfn), E820_RAM);
+
+ return who;
+}
+
+void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
+{
+ clear_bit(X86_FEATURE_VME, c->x86_capability);
+ clear_bit(X86_FEATURE_DE, c->x86_capability);
+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
+ clear_bit(X86_FEATURE_PGE, c->x86_capability);
+ clear_bit(X86_FEATURE_SEP, c->x86_capability);
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ clear_bit(X86_FEATURE_MTRR, c->x86_capability);
+}
+
+extern void hypervisor_callback(void);
+extern void failsafe_callback(void);
+
+static void __init machine_specific_arch_setup(void)
+{
+ HYPERVISOR_set_callbacks(
+ (unsigned long) hypervisor_callback,
+ (unsigned long) failsafe_callback,
+ (unsigned long) system_call);
+
+ machine_specific_modify_cpu_capabilities(&boot_cpu_data);
+}
--- /dev/null
+/* Hook to call BIOS initialisation function */
+
+#define ARCH_SETUP machine_specific_arch_setup();
+
+static void __init machine_specific_arch_setup(void);
--- /dev/null
+/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
+ * which needs to alter them. */
+
+static inline void smpboot_clear_io_apic_irqs(void)
+{
+#if 1
+ printk("smpboot_clear_io_apic_irqs\n");
+#else
+ io_apic_irqs = 0;
+#endif
+}
+
+static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+#if 1
+ printk("smpboot_setup_warm_reset_vector\n");
+#else
+ CMOS_WRITE(0xa, 0xf);
+ local_flush_tlb();
+ Dprintk("1.\n");
+ *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
+ Dprintk("2.\n");
+ *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
+ Dprintk("3.\n");
+#endif
+}
+
+static inline void smpboot_restore_warm_reset_vector(void)
+{
+ /*
+ * Install writable page 0 entry to set BIOS data area.
+ */
+ local_flush_tlb();
+
+ /*
+ * Paranoid: Set warm reset code and vector here back
+ * to default values.
+ */
+ CMOS_WRITE(0, 0xf);
+
+ *((volatile long *) phys_to_virt(0x467)) = 0;
+}
+
+static inline void smpboot_setup_io_apic(void)
+{
+#if 1
+ printk("smpboot_setup_io_apic\n");
+#else
+ /*
+ * Here we can be sure that there is an IO-APIC in the system. Let's
+ * go and set it up:
+ */
+ if (!skip_ioapic_setup && nr_ioapics)
+ setup_IO_APIC();
+#endif
+}
+
+
+#define smp_found_config (HYPERVISOR_shared_info->n_vcpu > 1)
--- /dev/null
+#ifndef __X86_64_MMU_CONTEXT_H
+#define __X86_64_MMU_CONTEXT_H
+
+#include <linux/config.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/pda.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+/*
+ * possibly do the LDT unload here?
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+
+#ifdef CONFIG_SMP
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (read_pda(mmu_state) == TLBSTATE_OK)
+ write_pda(mmu_state, TLBSTATE_LAZY);
+}
+#else
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+#endif
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned cpu = smp_processor_id();
+ if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ clear_bit(cpu, &prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+ write_pda(mmu_state, TLBSTATE_OK);
+ write_pda(active_mm, next);
+#endif
+ set_bit(cpu, &next->cpu_vm_mask);
+ load_cr3(next->pgd);
+ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
+ if (unlikely(next->context.ldt != prev->context.ldt))
+ load_LDT_nolock(&next->context, cpu);
+ }
+#ifdef CONFIG_SMP
+ else {
+ write_pda(mmu_state, TLBSTATE_OK);
+ if (read_pda(active_mm) != next)
+ out_of_line_bug();
+ if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
+ /* We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
+ load_cr3(next->pgd);
+ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
+ load_LDT_nolock(&next->context, cpu);
+ }
+ }
+#endif
+}
+
+#define deactivate_mm(tsk,mm) do { \
+ load_gs_index(0); \
+ asm volatile("movl %0,%%fs"::"r"(0)); \
+} while(0)
+
+#define activate_mm(prev, next) do { \
+ switch_mm((prev),(next),NULL); \
+ flush_page_update_queue(); \
+} while (0)
+
+#endif
--- /dev/null
+#ifndef _X86_64_PAGE_H
+#define _X86_64_PAGE_H
+
+#include <linux/config.h>
+/* #include <linux/string.h> */
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/foreign_page.h>
+
+#define arch_free_page(_page,_order) \
+({ int foreign = PageForeign(_page); \
+ if (foreign) \
+ (PageForeignDestructor(_page))(_page); \
+ foreign; \
+})
+#define HAVE_ARCH_FREE_PAGE
+
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p,_n) ((void)0)
+#endif
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (0x1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
+
+#define THREAD_ORDER 1
+#ifdef __ASSEMBLY__
+#define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
+#else
+#define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
+#endif
+#define CURRENT_MASK (~(THREAD_SIZE-1))
+
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+void clear_page(void *);
+void copy_page(void *, void *);
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+extern u32 *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) ((unsigned long) phys_to_machine_mapping[(unsigned int)(_pfn)])
+#define mfn_to_pfn(_mfn) ((unsigned long) machine_to_phys_mapping[(unsigned int)(_mfn)])
+static inline unsigned long phys_to_machine(unsigned long phys)
+{
+ unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
+}
+
+static inline unsigned long machine_to_phys(unsigned long machine)
+{
+ unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
+}
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pud; } pud_t;
+typedef struct { unsigned long pgd; } pgd_t;
+#define PTE_MASK PHYSICAL_PAGE_MASK
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) (((x).pte & 1) ? machine_to_phys((x).pte) : \
+ (x).pte)
+
+static inline unsigned long pmd_val(pmd_t x)
+{
+ unsigned long ret = x.pmd;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+}
+
+static inline unsigned long pud_val(pud_t x)
+{
+ unsigned long ret = x.pud;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+}
+
+static inline unsigned long pgd_val(pgd_t x)
+{
+ unsigned long ret = x.pgd;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+}
+
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte_ma(x) ((pte_t) { (x) } )
+
+static inline pte_t __pte(unsigned long x)
+{
+ if (x & 1) x = phys_to_machine(x);
+ return ((pte_t) { (x) });
+}
+
+static inline pmd_t __pmd(unsigned long x)
+{
+ if ((x & 1)) x = phys_to_machine(x);
+ return ((pmd_t) { (x) });
+}
+
+static inline pud_t __pud(unsigned long x)
+{
+ if ((x & 1)) x = phys_to_machine(x);
+ return ((pud_t) { (x) });
+}
+
+static inline pgd_t __pgd(unsigned long x)
+{
+ if ((x & 1)) x = phys_to_machine(x);
+ return ((pgd_t) { (x) });
+}
+
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+extern unsigned long vm_stack_flags, vm_stack_flags32;
+extern unsigned long vm_data_default_flags, vm_data_default_flags32;
+extern unsigned long vm_force_exec32;
+
+#define __START_KERNEL 0xffffffff80100000UL
+#define __START_KERNEL_map 0xffffffff80000000UL
+#define __PAGE_OFFSET 0xffff880000000000UL
+
+#else
+#define __START_KERNEL 0xffffffff80100000
+#define __START_KERNEL_map 0xffffffff80000000
+#define __PAGE_OFFSET 0xffff880000000000
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
+#define __PHYSICAL_MASK_SHIFT 46
+#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
+#define __VIRTUAL_MASK_SHIFT 48
+#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
+
+#define KERNEL_TEXT_SIZE (40UL*1024*1024)
+#define KERNEL_TEXT_START 0xffffffff80000000UL
+
+#ifndef __ASSEMBLY__
+
+#include <asm/bug.h>
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+
+/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
+ Otherwise you risk miscompilation. */
+#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
+/* __pa_symbol should be used for C visible symbols.
+ This seems to be the official gcc blessed way to do such arithmetic. */
+#define __pa_symbol(x) \
+ ({unsigned long v; \
+ asm("" : "=r" (v) : "0" (x)); \
+ __pa(v); })
+
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_to_page(pfn) (mem_map + (pfn))
+#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
+#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define __HAVE_ARCH_GATE_AREA 1
+
+#endif /* __KERNEL__ */
+
+#endif /* _X86_64_PAGE_H */
--- /dev/null
+#ifndef _ASMx86_64_PARAM_H
+#define _ASMx86_64_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ 100 /* Internal kernel timer frequency */
+# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
+# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif
--- /dev/null
+#ifndef __x8664_PCI_H
+#define __x8664_PCI_H
+
+#include <linux/config.h>
+#include <asm/io.h>
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h> /* for struct page */
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#ifdef CONFIG_PCI
+extern unsigned int pcibios_assign_all_busses(void);
+#else
+#define pcibios_assign_all_busses() 0
+#endif
+#define pcibios_scan_all_fns(a, b) 0
+
+extern int no_iommu, force_iommu;
+
+extern unsigned long pci_mem_start;
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM (pci_mem_start)
+
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
+void pcibios_config_init(void);
+struct pci_bus * pcibios_scan_root(int bus);
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
+
+void pcibios_set_master(struct pci_dev *dev);
+void pcibios_penalize_isa_irq(int irq);
+struct irq_routing_table *pcibios_get_irq_routing_table(void);
+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/page.h>
+
+extern int iommu_setup(char *opt);
+
+#ifdef CONFIG_GART_IOMMU
+/* The PCI address space does equal the physical memory
+ * address space. The networking and block device layers use
+ * this boolean for bounce buffer decisions
+ *
+ * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
+ * that an IOMMU is available.
+ */
+#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0)
+
+/*
+ * x86-64 always supports DAC, but sometimes it is useful to force
+ * devices through the IOMMU to get automatic sg list merging.
+ * Optional right now.
+ */
+extern int iommu_sac_force;
+#define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force)
+
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+
+#else
+/* No IOMMU */
+
+#define PCI_DMA_BUS_IS_PHYS 1
+#define pci_dac_dma_supported(pci_dev, mask) 1
+
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+#endif
+
+#include <asm-generic/pci-dma-compat.h>
+
+static inline dma64_addr_t
+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
+{
+ return ((dma64_addr_t) page_to_phys(page) +
+ (dma64_addr_t) offset);
+}
+
+static inline struct page *
+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return virt_to_page(__va(dma_addr));
+}
+
+static inline unsigned long
+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return (dma_addr & ~PAGE_MASK);
+}
+
+static inline void
+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
+{
+}
+
+static inline void
+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
+{
+ flush_write_buffers();
+}
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+#endif /* __KERNEL__ */
+
+/* generic pci stuff */
+#ifdef CONFIG_PCI
+#include <asm-generic/pci.h>
+#endif
+
+/* On Xen we have to scan all functions since Xen hides bridges from
+ * us. If a bridge is at fn=0 and that slot has a multifunction
+ * device, we won't find the additional devices without scanning all
+ * functions. */
+#undef pcibios_scan_all_fns
+#define pcibios_scan_all_fns(a, b) 1
+
+#endif /* __x8664_PCI_H */
--- /dev/null
+#ifndef X86_64_PDA_H
+#define X86_64_PDA_H
+
+#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/cache.h>
+
+/* Per processor datastructure. %gs points to it while the kernel runs */
+struct x8664_pda {
+ struct task_struct *pcurrent; /* Current process */
+ unsigned long data_offset; /* Per cpu data offset from linker address */
+ struct x8664_pda *me; /* Pointer to itself */
+ unsigned long kernelstack; /* top of kernel stack for current */
+ unsigned long oldrsp; /* user rsp for system call */
+ unsigned long irqrsp; /* Old rsp for interrupts. */
+ int irqcount; /* Irq nesting counter. Starts with -1 */
+ int cpunumber; /* Logical CPU number */
+ char *irqstackptr; /* top of irqstack */
+ unsigned int __softirq_pending;
+ unsigned int __nmi_count; /* number of NMI on this CPUs */
+ unsigned long idle_timestamp;
+ struct mm_struct *active_mm;
+ int mmu_state;
+ unsigned apic_timer_irqs;
+ int kernel_mode; /* kernel or user mode */
+} ____cacheline_aligned;
+
+
+#define IRQSTACK_ORDER 2
+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
+
+extern struct x8664_pda cpu_pda[];
+
+/*
+ * There is no fast way to get the base address of the PDA, all the accesses
+ * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
+ */
+#define sizeof_field(type,field) (sizeof(((type *)0)->field))
+#define typeof_field(type,field) typeof(((type *)0)->field)
+
+extern void __bad_pda_field(void);
+
+#define pda_offset(field) offsetof(struct x8664_pda, field)
+
+#define pda_to_op(op,field,val) do { \
+ switch (sizeof_field(struct x8664_pda, field)) { \
+case 2: \
+asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+case 4: \
+asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+case 8: \
+asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+ default: __bad_pda_field(); \
+ } \
+ } while (0)
+
+/*
+ * AK: PDA read accesses should be neither volatile nor have an memory clobber.
+ * Unfortunately removing them causes all hell to break lose currently.
+ */
+#define pda_from_op(op,field) ({ \
+ typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
+ switch (sizeof_field(struct x8664_pda, field)) { \
+case 2: \
+asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+case 4: \
+asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+case 8: \
+asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+ default: __bad_pda_field(); \
+ } \
+ ret__; })
+
+
+#define read_pda(field) pda_from_op("mov",field)
+#define write_pda(field,val) pda_to_op("mov",field,val)
+#define add_pda(field,val) pda_to_op("add",field,val)
+#define sub_pda(field,val) pda_to_op("sub",field,val)
+
+#endif
+
+#define PDA_STACKOFFSET (5*8)
+
+#endif
--- /dev/null
+#ifndef _X86_64_PGALLOC_H
+#define _X86_64_PGALLOC_H
+
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <asm/pda.h>
+#include <linux/threads.h>
+#include <linux/mm.h>
+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
+
+void make_page_readonly(void *va);
+void make_page_writable(void *va);
+void make_pages_readonly(void *va, unsigned int nr);
+void make_pages_writable(void *va, unsigned int nr);
+
+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
+ flush_page_update_queue();
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+{
+ set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
+ flush_page_update_queue();
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+ flush_page_update_queue();
+}
+
+/*
+ * We need to use the batch mode here, but pgd_pupulate() won't be
+ * be called frequently.
+ */
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+ set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
+ flush_page_update_queue();
+}
+
+extern __inline__ pmd_t *get_pmd(void)
+{
+ pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+ if (!pmd)
+ return NULL;
+ make_page_readonly(pmd);
+ xen_pmd_pin(__pa(pmd));
+ flush_page_update_queue();
+ return pmd;
+}
+
+extern __inline__ void pmd_free(pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ xen_pmd_unpin(__pa(pmd));
+ make_page_writable(pmd);
+ flush_page_update_queue();
+ free_page((unsigned long)pmd);
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!pmd)
+ return NULL;
+ make_page_readonly(pmd);
+ xen_pmd_pin(__pa(pmd));
+ flush_page_update_queue();
+ return pmd;
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!pud)
+ return NULL;
+ make_page_readonly(pud);
+ xen_pud_pin(__pa(pud));
+ flush_page_update_queue();
+ return pud;
+}
+
+static inline void pud_free(pud_t *pud)
+{
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ xen_pud_unpin(__pa(pud));
+ make_page_writable(pud);
+ flush_page_update_queue();
+ free_page((unsigned long)pud);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ /*
+ * We allocate two contiguous pages for kernel and user.
+ */
+ unsigned boundary;
+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
+
+ if (!pgd)
+ return NULL;
+ /*
+ * Copy kernel pointers in from init.
+ * Could keep a freelist or slab cache of those because the kernel
+ * part never changes.
+ */
+ boundary = pgd_index(__PAGE_OFFSET);
+ memset(pgd, 0, boundary * sizeof(pgd_t));
+ memcpy(pgd + boundary,
+ init_level4_pgt + boundary,
+ (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
+
+ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
+ make_pages_readonly(pgd, 2);
+
+ xen_pgd_pin(__pa(pgd)); /* kernel */
+ xen_pgd_pin(__pa(__user_pgd(pgd))); /* user */
+ /*
+ * Set level3_user_pgt for vsyscall area
+ */
+ set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
+ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
+ flush_page_update_queue();
+ return pgd;
+}
+
+static inline void pgd_free(pgd_t *pgd)
+{
+ BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
+ xen_pgd_unpin(__pa(pgd));
+ xen_pgd_unpin(__pa(__user_pgd(pgd)));
+ make_pages_writable(pgd, 2);
+ flush_page_update_queue();
+ free_pages((unsigned long)pgd, 1);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!pte)
+ return NULL;
+ make_page_readonly(pte);
+ xen_pte_pin(__pa(pte));
+ flush_page_update_queue();
+ return pte;
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!pte)
+ return NULL;
+ make_page_readonly(pte);
+ xen_pte_pin(__pa(pte));
+ flush_page_update_queue();
+ return virt_to_page((unsigned long)pte);
+}
+
+/* Should really implement gc for free page table pages. This could be
+ done with a reference count in struct page. */
+
+extern __inline__ void pte_free_kernel(pte_t *pte)
+{
+ BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
+ xen_pte_unpin(__pa(pte));
+ make_page_writable(pte);
+ flush_page_update_queue();
+ free_page((unsigned long)pte);
+}
+
+extern void pte_free(struct page *pte);
+
+//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+
+#define __pte_free_tlb(tlb,x) pte_free((x))
+#define __pmd_free_tlb(tlb,x) pmd_free((x))
+#define __pud_free_tlb(tlb,x) pud_free((x))
+
+#endif /* _X86_64_PGALLOC_H */
--- /dev/null
+#ifndef _X86_64_PGTABLE_H
+#define _X86_64_PGTABLE_H
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the x86-64 page table tree.
+ *
+ * x86-64 has a 4 level table setup. Generic linux MM only supports
+ * three levels. The fourth level is currently a single static page that
+ * is shared by everybody and just contains a pointer to the current
+ * three level page setup on the beginning and some kernel mappings at
+ * the end. For more details see Documentation/x86_64/mm.txt
+ */
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <asm/bitops.h>
+#include <linux/threads.h>
+#include <asm/pda.h>
+#include <asm-xen/hypervisor.h>
+extern pud_t level3_user_pgt[512];
+extern pud_t init_level4_pgt[];
+extern pud_t init_level4_user_pgt[];
+extern unsigned long __supported_pte_mask;
+
+#define swapper_pg_dir NULL
+
+extern int nonx_setup(char *str);
+extern void paging_init(void);
+extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
+
+extern unsigned long pgkern_mask;
+
+#define arbitrary_virt_to_machine(__va) ({0;})
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define PGDIR_SHIFT 39
+#define PTRS_PER_PGD 512
+
+/*
+ * PUDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PUD_SHIFT 30
+#define PTRS_PER_PUD 512
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT 21
+#define PTRS_PER_PMD 512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE 512
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
+
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
+
+#define pgd_none(x) (!pgd_val(x))
+#define pud_none(x) (!pud_val(x))
+
+#define set_pte_batched(pteptr, pteval) \
+ queue_l1_entry_update(pteptr, (pteval))
+
+extern inline int pud_present(pud_t pud) { return !pud_none(pud); }
+
+#ifdef CONFIG_SMP
+#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte)
+
+#else
+#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval.pte))
+#if 0
+static inline void set_pte(pte_t *dst, pte_t val)
+{
+ *dst = val;
+}
+#endif
+#endif
+
+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval).pmd)
+#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval).pud)
+#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval).pgd)
+
+extern inline void pud_clear (pud_t * pud)
+{
+ set_pud(pud, __pud(0));
+}
+
+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
+
+extern inline void pgd_clear (pgd_t * pgd)
+{
+ set_pgd(pgd, __pgd(0));
+ set_pgd(__user_pgd(pgd), __pgd(0));
+}
+
+#define pud_page(pud) \
+ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
+
+/*
+ * A note on implementation of this atomic 'get-and-clear' operation.
+ * This is actually very simple because Xen Linux can only run on a single
+ * processor. Therefore, we cannot race other processors setting the 'accessed'
+ * or 'dirty' bits on a page-table entry.
+ * Even if pages are shared between domains, that is not a problem because
+ * each domain will have separate page tables, with their own versions of
+ * accessed & dirty state.
+ */
+static inline pte_t ptep_get_and_clear(pte_t *xp)
+{
+ pte_t pte = *xp;
+ if (pte.pte)
+ set_pte(xp, __pte_ma(0));
+ return pte;
+}
+
+#define pte_same(a, b) ((a).pte == (b).pte)
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR 0
+
+#ifndef __ASSEMBLY__
+#define MAXMEM 0x3fffffffffffUL
+#define VMALLOC_START 0xffffc20000000000UL
+#define VMALLOC_END 0xffffe1ffffffffffUL
+#define MODULES_VADDR 0xffffffff88000000UL
+#define MODULES_END 0xfffffffffff00000UL
+#define MODULES_LEN (MODULES_END - MODULES_VADDR)
+
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_ACCESSED 5
+#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PWT 0x008
+#define _PAGE_PCD 0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY 0x040
+#define _PAGE_PSE 0x080 /* 2MB page */
+#define _PAGE_FILE 0x040 /* set:pagecache, unset:swap */
+#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
+
+#define _PAGE_PROTNONE 0x080 /* If not present */
+#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE _PAGE_TABLE
+
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY PAGE_COPY_NOEXEC
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+#define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER )
+#define __PAGE_KERNEL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+#define __PAGE_KERNEL_RO \
+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+#define __PAGE_KERNEL_VSYSCALL \
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_USER )
+#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD | _PAGE_USER )
+#define __PAGE_KERNEL_LARGE \
+ (__PAGE_KERNEL | _PAGE_PSE | _PAGE_USER )
+
+
+/*
+ * We don't support GLOBAL page in xenolinux64
+ */
+#define MAKE_GLOBAL(x) __pgprot((x))
+
+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
+#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
+
+/* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+static inline unsigned long pgd_bad(pgd_t pgd)
+{
+ unsigned long val = pgd_val(pgd);
+ val &= ~PTE_MASK;
+ val &= ~(_PAGE_USER | _PAGE_DIRTY);
+ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
+}
+
+static inline unsigned long pud_bad(pud_t pud)
+{
+ unsigned long val = pud_val(pud);
+ val &= ~PTE_MASK;
+ val &= ~(_PAGE_USER | _PAGE_DIRTY);
+ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
+}
+
+#define pte_none(x) (!(x).pte)
+#define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * We detect special mappings in one of two ways:
+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
+ * to be outside our maximum possible pseudophys range.
+ * 2. If the MFN belongs to a different domain then we will certainly
+ * not have MFN in our p2m table. Conversely, if the page is ours,
+ * then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the high bit gets shifted out
+ * (e.g., phys_to_machine()) so behaviour there is correct.
+ */
+#define INVALID_P2M_ENTRY (~0UL)
+#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+#define pte_pfn(_pte) \
+({ \
+ unsigned long mfn = (_pte).pte >> PAGE_SHIFT; \
+ unsigned pfn = mfn_to_pfn(mfn); \
+ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ pfn = max_mapnr; /* special: force !pfn_valid() */ \
+ pfn; \
+})
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+ pte_t pte;
+
+ (pte).pte = (pfn_to_mfn(page_nr) << PAGE_SHIFT);
+ (pte).pte |= pgprot_val(pgprot);
+ (pte).pte &= __supported_pte_mask;
+ return pte;
+}
+
+#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+#define __pte_val(x) ((x).pte)
+
+static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+extern inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+extern inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+extern inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
+extern inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
+extern inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
+static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
+
+extern inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
+extern inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
+extern inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
+extern inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int ret = pte_dirty(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkclean(pte).pte);
+ return ret;
+}
+
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int ret = pte_young(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkold(pte).pte);
+ return ret;
+}
+
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (pte_write(pte))
+ set_pte(ptep, pte_wrprotect(pte));
+}
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_dirty(pte))
+ xen_l1_entry_update(ptep, pte_mkdirty(pte).pte);
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
+
+#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
+static inline int pmd_large(pmd_t pte) {
+ return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
+}
+
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+/*
+ * Level 4 access.
+ * Never use these in the common code.
+ */
+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+#define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
+#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
+#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
+
+/* PUD - Level3 access */
+/* to find an entry in a page-table-directory. */
+#define pud_index(address) ((address >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
+static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
+{
+ return pud + pud_index(address);
+}
+
+/* Find correct pud via the hidden fourth level page level: */
+
+/* This accesses the reference page table of the boot cpu.
+ Other CPUs get synced lazily via the page fault handler. */
+static inline pud_t *pud_offset_k(unsigned long address)
+{
+ unsigned long addr;
+
+ addr = pud_val(init_level4_pgt[pud_index(address)]);
+ addr &= PHYSICAL_PAGE_MASK; /* machine physical */
+ addr = machine_to_phys(addr);
+ return __pud_offset_k((pud_t *)__va(addr), address);
+}
+
+/* PMD - Level 2 access */
+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
+ pmd_index(address))
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x) ((pmd_val(x) & ~PTE_MASK) != _KERNPG_TABLE )
+#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
+
+#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
+#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
+
+/* PTE - Level 1 access. */
+
+/* page, protection -> pte */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
+
+/* physical address -> PTE */
+static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
+{
+ pte_t pte;
+ (pte).pte = physpage | pgprot_val(pgprot);
+ return pte;
+}
+
+/* Change flags of a PTE */
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ (pte).pte &= _PAGE_CHG_MASK;
+ (pte).pte |= pgprot_val(newprot);
+ (pte).pte &= __supported_pte_mask;
+ return pte;
+}
+
+#define pte_index(address) \
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
+ pte_index(address))
+
+/* x86-64 always has all page tables mapped. */
+#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
+#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
+#define pte_unmap(pte) /* NOP */
+#define pte_unmap_nested(pte) /* NOP */
+
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+
+/* We only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time. */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ if (__dirty) { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ } while (0)
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x) (((x).val >> 1) & 0x3f)
+#define __swp_offset(x) ((x).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* !__ASSEMBLY__ */
+
+extern int kern_addr_valid(unsigned long addr);
+
+#define DOMID_LOCAL (0xFFFFU)
+
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid);
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v);
+
+#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
+ remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#define pgtable_cache_init() do { } while (0)
+#define check_pgt_cache() do { } while (0)
+
+#define PAGE_AGP PAGE_KERNEL_NOCACHE
+#define HAVE_PAGE_AGP 1
+
+/* fs/proc/kcore.c */
+#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
+#define kc_offset_to_vaddr(o) \
+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _X86_64_PGTABLE_H */
--- /dev/null
+/*
+ * include/asm-x86_64/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_X86_64_PROCESSOR_H
+#define __ASM_X86_64_PROCESSOR_H
+
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/msr.h>
+#include <asm/current.h>
+#include <asm/system.h>
+#include <asm/mmsegment.h>
+#include <asm/percpu.h>
+#include <linux/personality.h>
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000 /* virtual interrupt flag */
+#define VIP_MASK 0x00100000 /* virtual interrupt pending */
+#define ID_MASK 0x00200000
+
+#define desc_empty(desc) \
+ (!((desc)->a + (desc)->b))
+
+#define desc_equal(desc1, desc2) \
+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ */
+
+struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
+ __u32 x86_capability[NCAPINTS];
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ int x86_cache_size; /* in KB */
+ int x86_clflush_size;
+ int x86_cache_alignment;
+ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
+ __u8 x86_virt_bits, x86_phys_bits;
+ __u8 x86_num_cores;
+ __u8 x86_apicid;
+ __u32 x86_power;
+ __u32 x86_cpuid_level; /* Max CPUID function supported */
+ unsigned long loops_per_jiffy;
+} ____cacheline_aligned;
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NUM 8
+#define X86_VENDOR_UNKNOWN 0xff
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern char ignore_irq13;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+extern void dodgy_tsc(void);
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x0040 /* Machine check enable */
+#define X86_CR4_PGE 0x0080 /* enable global pages */
+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ switch (mask) {
+ case X86_CR4_OSFXSR:
+ case X86_CR4_OSXMMEXCPT:
+ break;
+ default:
+ do {
+ const char *msg = "Xen unsupported cr4 update\n";
+ (void)HYPERVISOR_console_io(
+ CONSOLEIO_write, __builtin_strlen(msg),
+ (char *)msg);
+ BUG();
+ } while (0);
+ }
+}
+
+#define load_cr3(pgdir) do { \
+ xen_pt_switch(__pa(pgdir)); \
+ per_cpu(cur_pgd, smp_processor_id()) = pgdir; \
+} while (/* CONSTCOND */0)
+
+/*
+ * Bus types
+ */
+#define MCA_bus 0
+#define MCA_bus__is_a_macro
+
+
+/*
+ * User space process size. 47bits.
+ */
+#define TASK_SIZE (0x800000000000UL)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
+#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3)
+#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3)
+#define TASK_UNMAPPED_BASE \
+ (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
+
+/*
+ * Size of io_bitmap.
+ */
+#define IO_BITMAP_BITS 65536
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+
+struct i387_fxsave_struct {
+ u16 cwd;
+ u16 swd;
+ u16 twd;
+ u16 fop;
+ u64 rip;
+ u64 rdp;
+ u32 mxcsr;
+ u32 mxcsr_mask;
+ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
+ u32 padding[24];
+} __attribute__ ((aligned (16)));
+
+union i387_union {
+ struct i387_fxsave_struct fxsave;
+};
+
+struct tss_struct {
+ u32 reserved1;
+ u64 rsp0;
+ u64 rsp1;
+ u64 rsp2;
+ u64 reserved2;
+ u64 ist[7];
+ u32 reserved3;
+ u32 reserved4;
+ u16 reserved5;
+ u16 io_bitmap_base;
+ /*
+ * The extra 1 is there because the CPU will access an
+ * additional byte beyond the end of the IO permission
+ * bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit. Thus we have:
+ *
+ * 128 bytes, the bitmap itself, for ports 0..0x3ff
+ * 8 bytes, for an extra "long" of ~0UL
+ */
+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+} __attribute__((packed)) ____cacheline_aligned;
+
+extern struct cpuinfo_x86 boot_cpu_data;
+DECLARE_PER_CPU(struct tss_struct,init_tss);
+DECLARE_PER_CPU(pgd_t *, cur_pgd);
+
+#define ARCH_MIN_TASKALIGN 16
+
+struct thread_struct {
+ unsigned long rsp0;
+ unsigned long rsp;
+ unsigned long userrsp; /* Copy from PDA */
+ unsigned long fs;
+ unsigned long gs;
+ unsigned int io_pl;
+ unsigned short es, ds, fsindex, gsindex;
+/* Hardware debugging registers */
+ unsigned long debugreg0;
+ unsigned long debugreg1;
+ unsigned long debugreg2;
+ unsigned long debugreg3;
+ unsigned long debugreg6;
+ unsigned long debugreg7;
+/* fault info */
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387 __attribute__((aligned(16)));
+/* IO permissions. the bitmap could be moved into the GDT, that would make
+ switch faster for a limited number of ioperm using tasks. -AK */
+ int ioperm;
+ unsigned long *io_bitmap_ptr;
+ unsigned io_bitmap_max;
+/* cached TLS descriptors. */
+ u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
+} __attribute__((aligned(16)));
+
+#define INIT_THREAD {}
+
+#define INIT_MMAP \
+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
+
+#define STACKFAULT_STACK 1
+#define DOUBLEFAULT_STACK 2
+#define NMI_STACK 3
+#define DEBUG_STACK 4
+#define MCE_STACK 5
+#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+#define EXCEPTION_STACK_ORDER 0
+
+#define start_thread(regs,new_rip,new_rsp) do { \
+ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
+ load_gs_index(0); \
+ (regs)->rip = (new_rip); \
+ (regs)->rsp = (new_rsp); \
+ write_pda(oldrsp, (new_rsp)); \
+ (regs)->cs = __USER_CS; \
+ (regs)->ss = __USER_DS; \
+ (regs)->eflags = 0x200; \
+ set_fs(USER_DS); \
+} while(0)
+
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/*
+ * Return saved PC of a blocked thread.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ */
+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
+
+extern unsigned long get_wchan(struct task_struct *p);
+#define KSTK_EIP(tsk) \
+ (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
+
+
+struct microcode_header {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
+};
+
+struct microcode {
+ struct microcode_header hdr;
+ unsigned int bits[0];
+};
+
+typedef struct microcode microcode_t;
+typedef struct microcode_header microcode_header_t;
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+};
+
+struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[0];
+};
+
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE _IO('6',0)
+
+
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
+
+/* Opteron nops */
+#define K8_NOP1 ".byte 0x90\n"
+#define K8_NOP2 ".byte 0x66,0x90\n"
+#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
+#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
+#define K8_NOP5 K8_NOP3 K8_NOP2
+#define K8_NOP6 K8_NOP3 K8_NOP3
+#define K8_NOP7 K8_NOP4 K8_NOP3
+#define K8_NOP8 K8_NOP4 K8_NOP4
+
+#define ASM_NOP_MAX 8
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+extern inline void rep_nop(void)
+{
+ __asm__ __volatile__("rep;nop": : :"memory");
+}
+
+/* Stop speculative execution */
+extern inline void sync_core(void)
+{
+ int tmp;
+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
+}
+
+#define cpu_has_fpu 1
+
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(void *x)
+{
+ asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+
+#define ARCH_HAS_PREFETCHW 1
+static inline void prefetchw(void *x)
+{
+ alternative_input(ASM_NOP5,
+ "prefetchw (%1)",
+ X86_FEATURE_3DNOW,
+ "r" (x));
+}
+
+#define ARCH_HAS_SPINLOCK_PREFETCH 1
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#define cpu_relax() rep_nop()
+
+/*
+ * NSC/Cyrix CPU configuration register indexes
+ */
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+/*
+ * NSC/Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+} while (0)
+
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+{
+ /* "monitor %eax,%ecx,%edx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc8;"
+ : :"a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax,%ecx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc9;"
+ : :"a" (eax), "c" (ecx));
+}
+
+#define stack_current() \
+({ \
+ struct thread_info *ti; \
+ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+ ti->task; \
+})
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+extern unsigned long boot_option_idle_override;
+/* Boot loader type from the setup header */
+extern int bootloader_type;
+
+#endif /* __ASM_X86_64_PROCESSOR_H */
--- /dev/null
+#ifndef _X86_64_PTRACE_H
+#define _X86_64_PTRACE_H
+
+#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
+#define R15 0
+#define R14 8
+#define R13 16
+#define R12 24
+#define RBP 32
+#define RBX 40
+/* arguments: interrupts/non tracing syscalls only save upto here*/
+#define R11 48
+#define R10 56
+#define R9 64
+#define R8 72
+#define RAX 80
+#define RCX 88
+#define RDX 96
+#define RSI 104
+#define RDI 112
+#define ORIG_RAX 120 /* = ERROR */
+/* end of arguments */
+/* cpu exception frame or undefined in case of fast syscall. */
+#define RIP 128
+#define CS 136
+#define EFLAGS 144
+#define RSP 152
+#define SS 160
+#define ARGOFFSET R11
+#endif /* __ASSEMBLY__ */
+
+/* top of stack page */
+#define FRAME_SIZE 168
+
+#define PTRACE_OLDSETOPTIONS 21
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long rbp;
+ unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+ unsigned long rip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long rsp;
+ unsigned long ss;
+/* top of stack page */
+};
+
+#endif
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_GETFPXREGS 18
+#define PTRACE_SETFPXREGS 19
+
+/* only useful for access 32bit programs */
+#define PTRACE_GET_THREAD_AREA 25
+#define PTRACE_SET_THREAD_AREA 26
+
+#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#define user_mode(regs) (!!((regs)->cs & 3))
+#define instruction_pointer(regs) ((regs)->rip)
+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+
+enum {
+ EF_CF = 0x00000001,
+ EF_PF = 0x00000004,
+ EF_AF = 0x00000010,
+ EF_ZF = 0x00000040,
+ EF_SF = 0x00000080,
+ EF_TF = 0x00000100,
+ EF_IE = 0x00000200,
+ EF_DF = 0x00000400,
+ EF_OF = 0x00000800,
+ EF_IOPL = 0x00003000,
+ EF_IOPL_RING0 = 0x00000000,
+ EF_IOPL_RING1 = 0x00001000,
+ EF_IOPL_RING2 = 0x00002000,
+ EF_NT = 0x00004000, /* nested task */
+ EF_RF = 0x00010000, /* resume */
+ EF_VM = 0x00020000, /* virtual mode */
+ EF_AC = 0x00040000, /* alignment */
+ EF_VIF = 0x00080000, /* virtual interrupt */
+ EF_VIP = 0x00100000, /* virtual interrupt pending */
+ EF_ID = 0x00200000, /* id */
+};
+
+#endif
+
+#endif
--- /dev/null
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#include <asm/cache.h>
+
+#define __KERNEL_CS 0x10
+#define __KERNEL_DS 0x1b
+
+#define __KERNEL32_CS 0x3b
+
+/*
+ * we cannot use the same code segment descriptor for user and kernel
+ * -- not even in the long flat mode, because of different DPL /kkeil
+ * The segment offset needs to contain a RPL. Grr. -AK
+ * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
+ */
+
+#define __USER32_CS 0x23 /* 4*8+3 */
+#define __USER_DS 0x2b /* 5*8+3 */
+#define __USER_CS 0x33 /* 6*8+3 */
+#define __USER32_DS __USER_DS
+#define __KERNEL16_CS (GDT_ENTRY_KERNELCS16 * 8)
+#define __KERNEL_COMPAT32_CS 0x8
+
+#define GDT_ENTRY_TLS 1
+#define GDT_ENTRY_TSS 8 /* needs two entries */
+#define GDT_ENTRY_LDT 10
+#define GDT_ENTRY_TLS_MIN 11
+#define GDT_ENTRY_TLS_MAX 13
+/* 14 free */
+#define GDT_ENTRY_KERNELCS16 15
+
+#define GDT_ENTRY_TLS_ENTRIES 3
+
+/* TLS indexes for 64bit - hardcoded in arch_prctl */
+#define FS_TLS 0
+#define GS_TLS 1
+
+#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
+#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
+
+#define IDT_ENTRIES 256
+#define GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY + 1)
+#define GDT_SIZE (GDT_ENTRIES * 8)
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#endif
--- /dev/null
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifndef __ASSEMBLY__
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+extern int disable_apic;
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#ifndef __ASSEMBLY__
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#ifdef CONFIG_X86_IO_APIC
+#include <asm/io_apic.h>
+#endif
+#include <asm/apic.h>
+#include <asm/thread_info.h>
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+#ifndef ASSEMBLY
+
+#include <asm/pda.h>
+
+struct pt_regs;
+
+/*
+ * Private routines/data
+ */
+
+extern void smp_alloc_memory(void);
+extern cpumask_t cpu_online_map;
+extern volatile unsigned long smp_invalidate_needed;
+extern int pic_mode;
+extern int smp_num_siblings;
+extern void smp_flush_tlb(void);
+extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
+extern void smp_send_reschedule(int cpu);
+extern void smp_invalidate_rcv(void); /* Process an NMI */
+extern void (*mtrr_hook) (void);
+extern void zap_low_mappings(void);
+void smp_stop_cpu(void);
+extern cpumask_t cpu_sibling_map[NR_CPUS];
+extern u8 phys_proc_id[NR_CPUS];
+
+#define SMP_TRAMPOLINE_BASE 0x6000
+
+/*
+ * On x86 all CPUs are mapped 1:1 to the APIC space.
+ * This simplifies scheduling and IPI sending and
+ * compresses data structures.
+ */
+
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_callin_map;
+#define cpu_possible_map cpu_callout_map
+
+static inline int num_booting_cpus(void)
+{
+ return cpus_weight(cpu_callout_map);
+}
+
+#define __smp_processor_id() read_pda(cpunumber)
+
+#ifdef CONFIG_X86_LOCAL_APIC
+extern __inline int hard_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
+}
+#endif
+
+#define safe_smp_processor_id() (disable_apic ? 0 : x86_apicid_to_cpu(hard_smp_processor_id()))
+
+#endif /* !ASSEMBLY */
+
+#define NO_PROC_ID 0xFF /* No processor magic marker */
+
+#endif
+
+#ifndef ASSEMBLY
+/*
+ * Some lowlevel functions might want to know about
+ * the real APIC ID <-> CPU # mapping.
+ */
+extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
+extern u8 x86_cpu_to_log_apicid[NR_CPUS];
+extern u8 bios_cpu_apicid[];
+#ifdef CONFIG_X86_LOCAL_APIC
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+ return cpus_addr(cpumask)[0];
+}
+
+static inline int x86_apicid_to_cpu(u8 apicid)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; ++i)
+ if (x86_cpu_to_apicid[i] == apicid)
+ return i;
+
+ /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
+ * or called too early. Either way, we must be CPU 0. */
+ if (x86_cpu_to_apicid[0] == BAD_APICID)
+ return 0;
+
+ return -1;
+}
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < NR_CPUS)
+ return (int)bios_cpu_apicid[mps_cpu];
+ else
+ return BAD_APICID;
+}
+#endif
+
+#endif /* !ASSEMBLY */
+
+#ifndef CONFIG_SMP
+#define stack_smp_processor_id() 0
+#define safe_smp_processor_id() 0
+#define cpu_logical_map(x) (x)
+#else
+#include <asm/thread_info.h>
+#define stack_smp_processor_id() \
+({ \
+ struct thread_info *ti; \
+ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+ ti->cpu; \
+})
+#endif
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_X86_LOCAL_APIC
+static __inline int logical_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+}
+#endif
+#endif
+
+#endif
+
--- /dev/null
+#ifndef __XEN_SYNCH_BITOPS_H__
+#define __XEN_SYNCH_BITOPS_H__
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ * Heavily modified to provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ *
+ * Copyed from asm-xen/asm-i386
+ */
+
+#include <linux/config.h>
+
+#define ADDR (*(volatile long *) addr)
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btsl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btrl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_change_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btcl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btsl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__ (
+ "lock btcl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "btl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+ return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+#endif /* __XEN_SYNCH_BITOPS_H__ */
--- /dev/null
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/segment.h>
+#include <asm/synch_bitops.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/xen-public/arch-x86_64.h>
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SMP
+#define LOCK_PREFIX "lock ; "
+#else
+#define LOCK_PREFIX ""
+#endif
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
+#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
+
+/* frame pointer must be last for get_wchan */
+#define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
+
+#define __EXTRA_CLOBBER \
+ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
+
+#define switch_to(prev,next,last) \
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
+ "call __switch_to\n\t" \
+ ".globl thread_return\n" \
+ "thread_return:\n\t" \
+ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
+ "movq %P[thread_info](%%rsi),%%r8\n\t" \
+ LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "jc ret_from_fork\n\t" \
+ RESTORE_CONTEXT \
+ : "=a" (last) \
+ : [next] "S" (next), [prev] "D" (prev), \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
+ [tif_fork] "i" (TIF_FORK), \
+ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
+ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
+ : "memory", "cc" __EXTRA_CLOBBER)
+
+
+extern void load_gs_index(unsigned);
+
+#define __load_gs_index(index) \
+ xen_load_gs((index))
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %k0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "movl %1,%%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 8\n\t" \
+ ".quad 1b,3b\n" \
+ ".previous" \
+ : :"r" (value), "r" (0))
+
+#define set_debug(value,register) \
+ __asm__("movq %0,%%db" #register \
+ : /* no output */ \
+ :"r" ((unsigned long) value))
+
+
+#ifdef __KERNEL__
+struct alt_instr {
+ __u8 *instr; /* original instruction */
+ __u8 *replacement;
+ __u8 cpuid; /* cpuid bit set for replacement */
+ __u8 instrlen; /* length of original instruction */
+ __u8 replacementlen; /* length of new instruction, <= instrlen */
+ __u8 pad[5];
+};
+#endif
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature) : "memory")
+
+/*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement maake sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature), ##input)
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+
+static inline unsigned long read_cr0(void)
+{
+ BUG();
+}
+
+static inline void write_cr0(unsigned long val)
+{
+ BUG();
+}
+
+static inline unsigned long read_cr3(void)
+{
+ BUG();
+}
+
+static inline unsigned long read_cr4(void)
+{
+ BUG();
+}
+
+static inline void write_cr4(unsigned long val)
+{
+ BUG();
+}
+
+#define stts() write_cr0(8 | read_cr0())
+
+static inline void wbinvd(void)
+{
+ mmu_update_t u;
+ u.ptr = MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_FLUSH_CACHE;
+ (void)HYPERVISOR_mmu_update(&u, 1, NULL);
+}
+
+#endif /* __KERNEL__ */
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+#define __xg(x) ((volatile long *)(x))
+
+extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
+{
+ *ptr = val;
+}
+
+#define _set_64bit set_64bit
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() do {} while(0)
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do {} while(0)
+#endif
+
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#define mb() asm volatile("mfence":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+
+#ifdef CONFIG_UNORDERED_IO
+#define wmb() asm volatile("sfence" ::: "memory")
+#else
+#define wmb() asm volatile("" ::: "memory")
+#endif
+#define read_barrier_depends() do {} while(0)
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
+
+
+/*
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operations must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+#define __cli() \
+do { \
+ vcpu_info_t *_vcpu; \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ preempt_enable_no_resched(); \
+ barrier(); \
+} while (0)
+
+#define __sti() \
+do { \
+ vcpu_info_t *_vcpu; \
+ barrier(); \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ preempt_enable(); \
+} while (0)
+
+#define __save_flags(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
+} while (0)
+
+#define __restore_flags(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ barrier(); \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ preempt_enable(); \
+ } else \
+ preempt_enable_no_resched(); \
+} while (0)
+
+#define safe_halt() ((void)0)
+
+#define __save_and_cli(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ preempt_enable_no_resched(); \
+ barrier(); \
+} while (0)
+
+void cpu_idle_wait(void);
+
+#define local_irq_save(x) __save_and_cli(x)
+#define local_irq_restore(x) __restore_flags(x)
+#define local_save_flags(x) __save_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+#define irqs_disabled() \
+ HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+#define HAVE_EAT_KEY
+void eat_key(void);
+
+#endif
--- /dev/null
+#ifndef _ASMi386_TIMER_H
+#define _ASMi386_TIMER_H
+#include <linux/init.h>
+
+/**
+ * struct timer_ops - used to define a timer source
+ *
+ * @name: name of the timer.
+ * @init: Probes and initializes the timer. Takes clock= override
+ * string as an argument. Returns 0 on success, anything else
+ * on failure.
+ * @mark_offset: called by the timer interrupt.
+ * @get_offset: called by gettimeofday(). Returns the number of microseconds
+ * since the last timer interupt.
+ * @monotonic_clock: returns the number of nanoseconds since the init of the
+ * timer.
+ * @delay: delays this many clock cycles.
+ */
+struct timer_opts {
+ char* name;
+ void (*mark_offset)(void);
+ unsigned long (*get_offset)(void);
+ unsigned long long (*monotonic_clock)(void);
+ void (*delay)(unsigned long);
+};
+
+struct init_timer_opts {
+ int (*init)(char *override);
+ struct timer_opts *opts;
+};
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+extern struct timer_opts* __init select_timer(void);
+extern void clock_fallback(void);
+void setup_pit_timer(void);
+
+/* Modifiers for buggy PIT handling */
+
+extern int pit_latch_buggy;
+
+extern struct timer_opts *cur_timer;
+extern int timer_ack;
+
+/* list of externed timers */
+extern struct timer_opts timer_none;
+extern struct timer_opts timer_pit;
+extern struct init_timer_opts timer_pit_init;
+extern struct init_timer_opts timer_tsc_init;
+#ifdef CONFIG_X86_CYCLONE_TIMER
+extern struct init_timer_opts timer_cyclone_init;
+#endif
+
+extern unsigned long calibrate_tsc(void);
+extern void init_cpu_khz(void);
+#ifdef CONFIG_HPET_TIMER
+extern struct init_timer_opts timer_hpet_init;
+extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+extern struct init_timer_opts timer_pmtmr_init;
+#endif
+#endif
--- /dev/null
+#ifndef _X8664_TLBFLUSH_H
+#define _X8664_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+#define __flush_tlb() xen_tlb_flush()
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+#define __flush_tlb_global() xen_tlb_flush()
+
+
+extern unsigned long pgkern_mask;
+
+#define __flush_tlb_all() __flush_tlb_global()
+
+#define __flush_tlb_one(addr) xen_invlpg(addr)
+
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the x86_64 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb();
+}
+
+#else
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+ __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+
+#endif
+
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* x86_64 does not keep any page table caches in TLB */
+}
+
+#endif /* _X8664_TLBFLUSH_H */
--- /dev/null
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+/*
+ * On the PC, we can just recalculate addresses and then
+ * access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
--- /dev/null
+/*
+ * x86-64 changes / gcc fixes from Andi Kleen.
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ *
+ * This hasn't been optimized for the hammer yet, but there are likely
+ * no advantages to be gotten from x86-64 here anyways.
+ */
+
+typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
+
+/* Doesn't use gcc to save the XMM registers, because there is no easy way to
+ tell it to do a clts before the register saving. */
+#define XMMS_SAVE do { \
+ preempt_disable(); \
+ if (!(current_thread_info()->status & TS_USEDFPU)) \
+ clts(); \
+ __asm__ __volatile__ ( \
+ "movups %%xmm0,(%1) ;\n\t" \
+ "movups %%xmm1,0x10(%1) ;\n\t" \
+ "movups %%xmm2,0x20(%1) ;\n\t" \
+ "movups %%xmm3,0x30(%1) ;\n\t" \
+ : "=&r" (cr0) \
+ : "r" (xmm_save) \
+ : "memory"); \
+} while(0)
+
+#define XMMS_RESTORE do { \
+ asm volatile ( \
+ "sfence ;\n\t" \
+ "movups (%1),%%xmm0 ;\n\t" \
+ "movups 0x10(%1),%%xmm1 ;\n\t" \
+ "movups 0x20(%1),%%xmm2 ;\n\t" \
+ "movups 0x30(%1),%%xmm3 ;\n\t" \
+ : \
+ : "r" (cr0), "r" (xmm_save) \
+ : "memory"); \
+ if (!(current_thread_info()->status & TS_USEDFPU)) \
+ stts(); \
+ preempt_enable(); \
+} while(0)
+
+#define OFFS(x) "16*("#x")"
+#define PF_OFFS(x) "256+16*("#x")"
+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
+#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
+#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
+#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
+#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
+#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
+#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
+#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
+#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
+
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned int lines = bytes >> 8;
+ unsigned long cr0;
+ xmm_store_t xmm_save[4];
+
+ XMMS_SAVE;
+
+ asm volatile (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " decl %[cnt] ; jnz 1b"
+ : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
+ : [inc] "r" (256UL)
+ : "memory");
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned int lines = bytes >> 8;
+ xmm_store_t xmm_save[4];
+ unsigned long cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
+ " decl %[cnt] ; jnz 1b"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+ : [inc] "r" (256UL)
+ : "memory");
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned int lines = bytes >> 8;
+ xmm_store_t xmm_save[4];
+ unsigned long cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ PF3(i) \
+ PF3(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
+ " addq %[inc], %[p4] ;\n"
+ " decl %[cnt] ; jnz 1b"
+ : [cnt] "+c" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+ : [inc] "r" (256UL)
+ : "memory" );
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned int lines = bytes >> 8;
+ xmm_store_t xmm_save[4];
+ unsigned long cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ PF3(i) \
+ PF3(i+2) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ PF4(i) \
+ PF4(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ XO4(i,0) \
+ XO4(i+1,1) \
+ XO4(i+2,2) \
+ XO4(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
+ " addq %[inc], %[p4] ;\n"
+ " addq %[inc], %[p5] ;\n"
+ " decl %[cnt] ; jnz 1b"
+ : [cnt] "+c" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
+ [p5] "+r" (p5)
+ : [inc] "r" (256UL)
+ : "memory");
+
+ XMMS_RESTORE;
+}
+
+static struct xor_block_template xor_block_sse = {
+ .name = "generic_sse",
+ .do_2 = xor_sse_2,
+ .do_3 = xor_sse_3,
+ .do_4 = xor_sse_4,
+ .do_5 = xor_sse_5,
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_sse); \
+ } while (0)
+
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
+#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
--- /dev/null
+diff -urN linux-2.6.10-orig/include/asm-x86_64/hw_irq.h linux-2.6.10/include/asm-x86_64/hw_irq.h
+--- linux-2.6.10-orig/include/asm-x86_64/hw_irq.h 2005-01-06 00:34:38.000000000 -0500
++++ linux-2.6.10/include/asm-x86_64/hw_irq.h 2005-02-25 17:45:37.181518088 -0500
+@@ -48,6 +48,7 @@
+ *
+ * Vectors 0xf0-0xf9 are free (reserved for future Linux use).
+ */
++#if 0
+ #define SPURIOUS_APIC_VECTOR 0xff
+ #define ERROR_APIC_VECTOR 0xfe
+ #define INVALIDATE_TLB_VECTOR 0xfd
+@@ -57,7 +58,7 @@
+ #define KDB_VECTOR 0xf9
+
+ #define THERMAL_APIC_VECTOR 0xf0
+-
++#endif
+
+ /*
+ * Local APIC timer IRQ vector is on a different priority level,
+diff -urN linux-2.6.10-orig/include/asm-x86_64/irq.h linux-2.6.10/include/asm-x86_64/irq.h
+--- linux-2.6.10-orig/include/asm-x86_64/irq.h 2005-01-06 00:34:38.000000000 -0500
++++ linux-2.6.10/include/asm-x86_64/irq.h 2005-02-25 17:45:37.181518088 -0500
+@@ -10,6 +10,7 @@
+ * <tomsoft@informatik.tu-chemnitz.de>
+ */
+
++#include "irq_vectors.h"
+ #define TIMER_IRQ 0
+
+ /*
+@@ -22,6 +23,7 @@
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
++#if 0
+ /*
+ * The maximum number of vectors supported by x86_64 processors
+ * is limited to 256. For processors other than x86_64, NR_VECTORS
+@@ -38,6 +40,7 @@
+ #define NR_IRQS 224
+ #define NR_IRQ_VECTORS 1024
+ #endif
++#endif
+
+ static __inline__ int irq_canonicalize(int irq)
+ {
+diff -urN linux-2.6.10-orig/include/asm-x86_64/posix_types.h linux-2.6.10/include/asm-x86_64/posix_types.h
+--- linux-2.6.10-orig/include/asm-x86_64/posix_types.h 2004-10-18 17:55:29.000000000 -0400
++++ linux-2.6.10/include/asm-x86_64/posix_types.h 2005-02-25 17:45:37.183517784 -0500
+@@ -6,7 +6,7 @@
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+-
++#ifndef __ASSEMBLY__
+ typedef unsigned long __kernel_ino_t;
+ typedef unsigned int __kernel_mode_t;
+ typedef unsigned long __kernel_nlink_t;
+@@ -115,5 +115,5 @@
+ }
+
+ #endif /* defined(__KERNEL__) */
+-
++#endif
+ #endif